source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
panel.py
|
from tkinter import *
from tkinter import messagebox
import socket
import threading
import os
import time
path = os.path.expanduser("~/")
host = 'localhost'
port = 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class panel:
def __init__(self, name="Dev", master=None):
try:
file = open(path + "clientConfig.config", "r")
file.readline()
host = file.readline().replace("ip:", "").replace("\n", "")
port = int(file.readline().replace("port:", ""))
file.close()
print(host)
print(port)
except:
host = 'localhost'
port = 5000
self.name = name
#self.name = "Teste"
self.th = False
#Declarando os parametros da janela
self.window = master
self.window.title("Internal Messages v1.5")
self.window.geometry("800x630+50+50")
#self.window.minsize(width=800, height=600)
#self.window.maxsize(width=800, height=660)
self.window.resizable(width=False, height=False)
try:
s.connect((host, port))
s.send(str.encode(str("00-00-00-00-00-00")))
time.sleep(1.5)
s.send(str.encode(str("00-00-6e-61-6d-65" + self.name)))
except:
messagebox.showwarning(title="Internal Message Error", message="Error Code: 503\nServidor não encontrado")
self.window.destroy()
#Frame mestre da janela
self.container = Frame(master, pady="20")
self.container.pack(side=TOP)
#Frames de containers secundarios
self.title = LabelFrame(self.container, text="Logado como")
self.title.pack(side=TOP, fill="both", expand="yes", padx=20, pady=10)
self.chatA = LabelFrame(self.container, text="Chat")
self.chatA.pack(side=TOP, fill="both", expand="yes", padx=20, pady=10)
self.chat = Frame(self.chatA)
self.chat.pack(padx=10, pady=10)
self.msg = LabelFrame(self.container, text="Digite sua mensagem")
self.msg.pack(side=TOP, fill="both", expand="yes", padx=20, pady=10)
self.quit = Frame(self.container)
self.quit.pack(side=TOP, fill="both", expand="yes", padx=20, pady=10)
#Nome so usuário
self.lblLogado = Label(self.title, text=self.name, font="Verdana 12 bold")
self.lblLogado.pack(side=LEFT, padx=40)
#Scrollbar/chat
self.cs = Scrollbar(self.chat, orient="vertical")
self.cs.pack(side=RIGHT, fill="y")
self.txtChat = Text(self.chat, width="80", height="20", font="Verdana 10 bold", state="disabled", relief="raise", yscrollcommand=self.cs.set)
self.txtChat.bind("<Escape>", self.quiting)
self.txtChat.pack(side=RIGHT)
self.cs.config(command=self.txtChat.yview)
#Escrever a mensagem para envio
self.txtMsg = Entry(self.msg, width="70")
self.txtMsg.bind("<Return>", self.send)
self.txtMsg.bind("<Escape>", self.quiting)
self.txtMsg.pack(side=LEFT)
#Botões
self.btnSend = Button(self.msg, text="Enviar", width="10", height="2", font="Verdana 12 bold")
self.btnSend.bind("<Button>", self.send)
self.btnSend.pack(side=RIGHT)
self.btnExit = Button(self.quit, text="Sair", width="10", font="Verdana 10 bold")
self.btnExit.bind("<Button>", self.quiting)
self.btnExit.grid(row = 3, column = 0, columnspan = 3)
def request():
while True:
data = s.recv(1024)
msg = data.decode()
if msg == "73-63-6c-6f-73-65":
th = True
s.send(str.encode(str("63-63-6c-6f-73-65" + self.name)))
s.shutdown(0)
s.close()
self.window.destroy()
break
#print(msg)
self.txtChat["state"] = "normal"
self.txtChat.insert(END, msg)
self.txtChat.yview_moveto(1.0)
self.txtChat["state"] = "disabled"
threading.Thread(target=request).start()
if self.th:
threading.stop()
def quiting(self, event):
th = True
s.send(str.encode(str("63-63-6c-6f-73-65" + self.name)))
s.shutdown(0)
s.close()
self.window.destroy()
def send(self, event):
msg = str(self.txtMsg.get())
if msg != "":
msg = "00-00-00-00-00-00" + self.name + ":\n " + msg + "\n\n"
s.send(str.encode(str(msg)))
self.txtMsg.delete(0, END)
#window = Tk()
#panel(window)
#window.title("Internal Messages v1.5")
#window.geometry("800x600")
#window.resizable(width=False, height=False)
#window.mainloop()
|
cameratimerbackend.py
|
from threading import Timer, Thread
from time import time
class RepeatedTimer():
def __init__(self, interval, function, timelimit = None, countlimit = None, callback = None):
# announce interval to class
self.interval = interval
# announce target function to class
self.function = function
# init variable for
self.is_running = False
# error catching
assert not ((timelimit is not None) and (countlimit is not None)), 'Cannot use both time limit and count limit'
assert not ((timelimit is None) and (countlimit is None)), 'Time limit xor count limit must be defined'
if timelimit is not None:
# announce timelimit
self.timelimit = timelimit
elif countlimit is not None:
# convert countlimit to timelimit
self.timelimit = self.interval*countlimit - self.interval/2
# recalibrate time limit to take into account first run at time t=0
self.timelimit = self.timelimit - self.interval
# announce callback function
self.callback = callback
def __run(self):
self.is_running = False
self.start_it()
self.function()
def start_it(self):
if not self.is_running and (time() - self.time_init) < self.timelimit:
self.next_call += self.interval
self._timer = Timer(self.next_call - time(), self.__run)
self._timer.start()
self.is_running = True
else:
self.stop()
def start_all(self):
# set state as running
self.is_active = 1
# re-initialise timer interrupt
self.is_running = False
# get starting time for time limit
self.time_init = time()
# start 0th instance
initial_thread = Thread(target=self.function)
initial_thread.start()
# get starting time for 0th timed call
self.next_call = time()
self.start_it()
def stop(self):
self._timer.cancel()
self.is_running = True
if self.callback is not None:
self.callback()
# set state as not running
self.is_active = 0
|
EFScheduler.py
|
#!/bin/env python
#coding:utf-8
import subprocess
import datetime
import time
import os
import sys
import signal
import getopt
import threading
import select
SHUTDOWN = False
def shutdown(sigNum, frame):
global SHUTDOWN
SHUTDOWN = True
sys.stderr.write('Catch Signal : %s \n\n' % sigNum)
sys.stderr.flush()
signal.signal(signal.SIGTERM, shutdown) #sigNum 15 : Terminate
signal.signal(signal.SIGINT, shutdown) #sigNum 2 : Interrupt
def usage():
print >> sys.stderr, """Usage : python [this_filename][file][,timeoption(default: -m * -h * -d * -o * -w *)]
[this_filename] : EFScheduler.py
[file] : -p ( file://%Y/%m/%d-%H:%M:%S ) formatting
-e (filepath) exec file
[timeoption] : -m ( number[0-59] | (number)-(number) | 0,2,3,...,59 ) minute
-h ( number[0-24] | (number)-(number) | 0,2,3,...,24 ) hour
-d ( number[1-31] | (number)-(number) | 1,2,3,...,31 ) day
-o ( number[1-12] | (number)-(number) | 1,2,3,...,12 ) month
-w ( number[0-6] | (number)-(number) | 0(mon),2,3,...,6(sun)) weekend
Condition : 1.this file wait only 10second, when [file] don't print stdout or stderr
2.nevertheless stdout or stderr is nothing, this file stdout '' and stderr ''
"""
print >> sys.stderr, """
Exam : 1. python EFScheduler.py -e "test.sh" -m */2
2. python EFScheduler.py -e "test.sh" -p "test.sh://%Y/%m/%d-%H:%M:%S -m 10-20" -h 0 \n\n"""
class EFScheduler(object) :
def __init__(self, options,args):
super(EFScheduler, self).__init__()
self.options = options
self.args = args
ft = datetime.datetime.today()
self.delta_input = [ft.minute,ft.hour,ft.day,ft.month,ft.weekday(),ft.second]
self.console_input = ['*','*','*','*','*']
self.slashlist =[[],[],[],[],[]]
self.fdinput=[]
self.formatinput=[]
self.token=0
def preprocessing(self):
epflag =True
for op, p in self.options:
if p == sys.argv[0]:
p='*'
if op in '-m':
self.console_input[0]=p
elif op in '-h':
self.console_input[1]=p
elif op in '-d':
self.console_input[2]=p
elif op in '-o':
self.console_input[3]=p
elif op in '-w':
self.console_input[4]=p
elif op in '-e':
epflag = False
if p.lower() == 'python':
raise Exception(" mark '' is needed. exam : 'python xxx.py' ")
else:
self.fdinput.append(p)
elif op in '-p':
epflag = False
self.formatinput.append(p)
elif op in '--help':
usage()
os._exit(1)
else:
raise Exception("Unhandled Option, option --help")
os._exit(1)
if epflag:
raise Exception("You must use -p or -e option")
os._exit(1)
def optionprocessing(self, consoleIndex, limittime, rangestart=0):
if '/' in self.console_input[consoleIndex]:
sp = self.console_input[consoleIndex].split('/')
args = sp[0]
else:
args = self.console_input[consoleIndex]
numlist=[]
if '*' in args:
numlist = [i for i in range(rangestart,limittime)]
if ',' in args:
token = args.split(',')
numlist = [int(i) for i in token]
if '-' in args:
token = args.split('-')
for i in range(int(token[0]),int(token[1])+1):
numlist.append(int(i))
if '/' in self.console_input[consoleIndex]:
token = int(sp[1])
self.slashlist[consoleIndex] =[i for i in range(0,60) if i%token ==0]
if args.isdigit():
numlist=[int(args)]
return numlist
def read_pipe(self, pipe, wfunc, pros) :
while True :
r_list, w_list, e_list = select.select([pipe], [], [], 1)
if r_list :
try :
msg = pipe.readline()
except :
break
if msg == '' : break
wfunc.write(msg)
wfunc.flush()
try : pros.terminate()
except : pass
try : pros.kill()
except : pass
pros.poll()
def execute_process(self) :
for cmd in self.fdinput:
try:
pros = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, close_fds=False)
stdout_read = threading.Thread(target=self.read_pipe, args=[pros.stdout, sys.stdout, pros])
stdout_read .setDaemon(True)
stdout_read.start()
stderr_read = threading.Thread(target=self.read_pipe, args=[pros.stderr, sys.stderr, pros])
stderr_read .setDaemon(True)
stderr_read.start()
except Exception, err : #unvalid input about option -e
sys.stderr.write("Option -e is Exception (%s) : %s \n" % (err, cmd))
sys.stderr.flush()
def print_stdout(self) :
for fm in self.formatinput:
try:
msg = time.strftime(fm, time.localtime())
sys.stdout.write("%s\n" % msg)
sys.stdout.flush()
sys.stderr.write("%s\n" % msg)
sys.stderr.flush()
except Exception, err:
sys.stderr.write("Option -p is Exception : %s \n" % fm)
sys.stderr.flush()
pass
def processing(self, nt, minute, hour, day, month, weekday):
if nt.second in [0,1]:
if self.token ==1:
return
self.token =1
else:
self.token =0
return
if not nt.minute in minute:
return
if not nt.hour in hour:
return
if not nt.day in day:
return
if not nt.month in month:
return
if not nt.weekday() in weekday:
return
self.execute_process()
self.print_stdout()
def run(self):
self.preprocessing()
minutelist = self.optionprocessing(0,60)
hourlist = self.optionprocessing(1,24)
daylist = self.optionprocessing(2,32,1)
monthlist = self.optionprocessing(3,13,1)
weekdaylist = self.optionprocessing(4,7)
slashlist = self.slashlist
totallist = [minutelist, hourlist, daylist, monthlist, weekdaylist]
tmplist=[]
for i in range(len(slashlist)):
if len(slashlist[i]) > 0:
for n in totallist[i]:
if n in slashlist[i]:
tmplist.append(n)
totallist[i] = tmplist
while not SHUTDOWN:
nt = datetime.datetime.today()
#print "NOW TIME", nt # For Test
self.processing(nt, totallist[0], totallist[1], totallist[2], totallist[3], totallist[4])
time.sleep(1)
def main():
try:
if len(sys.argv)==1:
usage()
raise Exception("you must write option -e or -p")
os._exit(1)
options, args = getopt.getopt(sys.argv[1:], 'm:h:d:o:w:e:p:',['help'])
obj = EFScheduler(options,args)
obj.run()
except getopt.GetoptError:
raise Exception("unhandled option")
if __name__=="__main__":
main()
|
ur5_2_controller.py
|
#! /usr/bin/env python2.7
"""
This file control ur5_2, handle conveyor belt and placement of boxes from belt to bin.
"""
import rospy
import time
import datetime
from camera_one.camera_one import Camera1,get_item_details
from ur5_moveit.ur5_moveit import Ur5Moveit,define_pose,define_joint_angle_list
from iot_client.iot_client import IotClient
from pkg_vb_sim.msg import LogicalCameraImage
from pkg_vb_sim.srv import vacuumGripper
from sensor_msgs.msg import Image
from pkg_task6.msg import OrderDetailArray
import socket #tcp/ip connection
def get_time_str(estimated_time_of_delivery):
"""
This function return a time string of now plus estimated_time_of_delivery days
:param estimated_time_of_delivery: Number of days to be added in time right now.
:return: String of time.
"""
x = datetime.datetime.now() + datetime.timedelta(days=int(estimated_time_of_delivery))
return x.strftime("%a")+' '+x.strftime("%b")+' '+x.strftime("%d")+' '+x.strftime("%Y")+' - '+x.strftime("%X")
def bot_2_controller(data):
"""
This function control complete functioning of ur5_2
Pick and Placement operation of boxes and belt start and stop operations.
:param data: It contains LogicalCameraImage information published from topic /eyrc/vb/logical_camera_2
:return: null
"""
models_length = len(data.models)
global box_place_list
global should_start_conver
global ur5
global l
global dispatched_orders
global ic
global action_client
global client_multi_socket
if models_length > 0:
package_color=''
if models_length == 1:
x = data.models[0].pose.position.x
y = data.models[0].pose.position.y
package = data.models[0].type
print("package name: ",package)
#condition_breaker =1
else:
x = data.models[1].pose.position.x
y = data.models[1].pose.position.y
print(data.models)
package = data.models[1].type
print(package)
if package != "ur5":
package_color=l[package]
print("package color: ",package_color)
msg="busy"
client_multi_socket.send(str.encode(msg))
# ur5.set_joint_angles(define_joint_angle_list(168.661264258,-50,50.3742109357,-97.2680961679,-90.0263818056,-11.2957158804))
if y < 0.1 and package != "ur5":#--pkg1,2,3
#condition_breaker =1
# ur5.change_go_to_home()
# thread = threading.Thread(target=trigger_converbelt,args=(0,))
ur5.trigger_converbelt(0) # has to take care
rospy.sleep(.4)
msg="free" #test
client_multi_socket.send(str.encode(msg)) #test
# thread.start()
if package in box_place_list:
print("returning")
return 1
gazebo_y = round(y,3)
constant_x=1.00500025103
constant_diff=0.00000018366
# constant_diff=0.0000003163
multiplier=(x-constant_x)/constant_diff
multiplier=round(multiplier,1)
print(multiplier)
#gazebo_x =-(0.8+.05*multiplier)
gazebo_x= -0.686
box_length = 0.15 # Length of the Package
vacuum_gripper_width = 0.115 # Vacuum Gripper Width
delta = vacuum_gripper_width + (box_length/2) # 0.19
# global vaccum
print(delta)
#gazebo_x =-0.69
# Teams may use this info in Tasks
global freq
ur5.add_box(gazebo_x,gazebo_y, 0.99,'box')
#ur5.add_box(gazebo_x,gazebo_y, 0.99)
ur5.hard_go_to_pose(define_pose(gazebo_x,gazebo_y,(1 + vacuum_gripper_width + (box_length/2))),4)
#testing--->>>
ros_service_status ="busy"
while ros_service_status=="busy":
msg="status_check"
client_multi_socket.send(str.encode(msg))
res = client_multi_socket.recv(1024)
ros_service_status=str(res.decode('utf-8'))
print(res.decode('utf-8'))
if ros_service_status=="busy":
rospy.sleep(1)
else:
msg="busy"
client_multi_socket.send(str.encode(msg))
rospy.sleep(.2)
break
#---->>>>>>>
newl = vaccum(True)
print(newl)
# attach_box_thread(True)
rospy.sleep(.4)
msg="free" #test
client_multi_socket.send(str.encode(msg)) #test
rospy.loginfo('\033[94m' + "Translating EE by 0.5m in x from current position." + '\033[0m')
if package_color == "red":
#----Coordinate of red bin----->>>>>>>>>>>
ur5.attach_box('box')
ur5.ee_cartesian_translation(0,0,0.2)
isDone = ur5.hard_go_to_pose(define_pose(0.2,0.6,1.265),4)
print(isDone,'IsDone')
if not isDone:
ur5.detach_box('box')
ur5.remove_box('box')
# ur5.set_joint_angles(define_joint_angle_list(179,-57,86,-119,-88,0),4)
ur5.set_joint_angles(define_joint_angle_list(168.661264258,-50,50.3742109357,-97.2680961679,-90.0263818056,-11.2957158804))
box_place_list.append(package)
#newt = vaccum(False)
# attach_box_thread(False)
# ur5.hard_set_joint_angles(define_joint_angle_list(61.6301577746,19.9056122889,-90.7433259006,-19.1991556074,-90.037481863,-118.422844125),4)
rospy.loginfo('\033[96m' + "Successfully Placed Red Box" + '\033[0m')
elif package_color == "green":
#----Coordinate of green bin----->>>>>>>>>>>
ur5.attach_box('box')
ur5.ee_cartesian_translation(0,0,0.2)
list_joint_values =ur5.group.get_current_joint_values()
rospy.loginfo('\033[94m' + ">>> Current Joint Values:" + '\033[0m')
rospy.loginfo(list_joint_values)
#print(list_joint_values)
isDone = ur5.hard_go_to_pose(define_pose(0.2,-0.65,1.265),4)
print(isDone,'IsDone')
if not isDone:
ur5.detach_box('box')
ur5.remove_box('box')
ur5.set_joint_angles(define_joint_angle_list(168.661264258,-50,50.3742109357,-97.2680961679,-90.0263818056,-11.2957158804))
box_place_list.append(package)
#newt = vaccum(False)
# attach_box_thread(False)
# ur5.hard_set_joint_angles(define_joint_angle_list(-82.1363218895,-60.1866792126,81.5471504912,-111.31441537,-89.9371709008,97.8410717962),4)
#ur5.hard_set_joint_angles(define_joint_angle_list(-8.97200920273,-57.5998250621,77.4253084494,-109.817161576,-90.0504541341,170.994315015),4)
rospy.loginfo('\033[96m' + "Successfully Placed Green Box" + '\033[0m')
elif package_color == "yellow":
#----Coordinate of yellow bin----->>>>>>>>>>>
ur5.attach_box('box')
ur5.ee_cartesian_translation(0,0,0.2)
isDone = ur5.hard_go_to_pose(define_pose(0.7,0,1.265),4)
print(isDone,'IsDone')
if not isDone:
ur5.detach_box('box')
ur5.remove_box('box')
ur5.set_joint_angles(define_joint_angle_list(168.661264258,-50,50.3742109357,-97.2680961679,-90.0263818056,-11.2957158804))
box_place_list.append(package)
#newt = vaccum(False)
# attach_box_thread(False)
# ur5.hard_set_joint_angles(define_joint_angle_list(-8.97200920273,-57.5998250621,77.4253084494,-109.817161576,-90.0504541341,170.994315015),4)
#ur5.hard_set_joint_angles(define_joint_angle_list(-82.1363218895,-60.1866792126,81.5471504912,-111.31441537,-89.9371709008,97.8410717962),4)
rospy.loginfo('\033[96m' + "Successfully Placed Yellow Box" + '\033[0m')
else:
rospy.loginfo('\033[96m' + "Box Type Unknown" + '\033[0m')
#testing--->>>
ros_service_status ="busy"
while ros_service_status=="busy":
msg="status_check"
client_multi_socket.send(str.encode(msg))
res = client_multi_socket.recv(1024)
ros_service_status=str(res.decode('utf-8'))
print(res.decode('utf-8'))
if ros_service_status=="busy":
rospy.sleep(1)
else:
msg="busy"
client_multi_socket.send(str.encode(msg))
rospy.sleep(.2)
break
#---->>>>>>>
newt = vaccum(False)
print(newt)
rospy.sleep(.4)
msg="free" #test
client_multi_socket.send(str.encode(msg)) #test
ur5.detach_box('box')
ur5.remove_box('box')
box_place_list.append(package)
should_start_conver = True
order_id = dispatched_orders[package]['id']
order_city = dispatched_orders[package]['city']
order_details = get_item_details(package_color)
# {'item':'Medicines','sku_alpha':'R','priority':'HP','cost':'450','Estimated Time of Delivery':'1'}
print(order_id,order_details,'details')
shipped_str = 'OrdersShipped,'+order_id+','+order_city+','+order_details['item']+','+order_details['priority']+',1,'+order_details['cost']+',Yes,'+get_time_str(0)+','+get_time_str(order_details['Estimated Time of Delivery'])+','+order_details['Estimated Time of Delivery']
print(shipped_str,'string ship')
goal_handle1 = action_client.send_goal("mqtt", "pub", action_client.config_mqtt_pub_topic, shipped_str)
action_client.goal_handles['1'] = goal_handle1
#rospy.sleep(3)
else:
if models_length<3:
print("need to look here; package name: ",package)
#ur5.trigger_converbelt(75) #trigger_converbelt(100)
print("need to look here; package name: ",package)
else:
ur5.set_joint_angles(define_joint_angle_list(168.661264258,-50,50.3742109357,-97.2680961679,-90.0263818056,-11.2957158804))
if not ur5.is_conver_active and should_start_conver:
# bot pose command ---
#testing--->>>
ros_service_status ="busy"
while ros_service_status=="busy":
msg="status_check"
client_multi_socket.send(str.encode(msg))
res = client_multi_socket.recv(1024)
ros_service_status=str(res.decode('utf-8'))
print(res.decode('utf-8'))
if ros_service_status=="busy":
rospy.sleep(1)
else:
msg="busy"
client_multi_socket.send(str.encode(msg))
rospy.sleep(.2)
break
#---->>>>>>>
ur5.trigger_converbelt(90)
msg="free" #test
client_multi_socket.send(str.encode(msg)) #test
def match_boxcolor_with_no(data):
global ic
ic.callback(data)
inv = ic.inventory
global action_client
for str in inv:
time.sleep(4)
goal_handle1 = action_client.send_goal("mqtt", "pub", action_client.config_mqtt_pub_topic, str)
action_client.goal_handles['1'] = goal_handle1
print("func")
def get_order_number(data):
"""
Callback function
It publishes data to the topic /eyrc/vb/order_number
:param data: It is a msg containing order_id,city and name.
:return: null
"""
order_detail = data
global dispatched_orders
dispatched_orders.update({order_detail.name:{'id':order_detail.order_id,'city':order_detail.city}})
print(dispatched_orders)
def main():
global ic
rospy.init_node('ur5_2_controller',anonymous=True)
global action_client
action_client = IotClient()
global temp
global dispatched_orders
dispatched_orders = {}
temp = rospy.Subscriber("/eyrc/vb/camera_1/image_raw", Image,match_boxcolor_with_no,queue_size=1)
ic= Camera1(temp)
rospy.sleep(1)
global l #list
l = ic.list
print(l,'list')
global freq
global ur5
global vaccum
vaccum = rospy.ServiceProxy('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_2', vacuumGripper)
global should_start_conver
should_start_conver = True
ur5 = Ur5Moveit('ur5_2')
freq = 0
global box_place_list
box_place_list =[]
ur5.trigger_converbelt(90) #--testing for parallelism
global client_multi_socket
#testing
client_multi_socket = socket.socket()
host = '127.0.0.1'
port = 2004
print('Waiting for connection response')
while True:
try:
client_multi_socket.connect((host, port))
break
except socket.error as e:
#print(str(e))
print("please start local server")
rospy.sleep(1.5)
client_multi_socket.recv(1024)
#----->>>>
ur5.set_joint_angles(
define_joint_angle_list(168.661264258, -50, 50.3742109357, -97.2680961679, -90.0263818056, -11.2957158804))
rospy.Subscriber('/eyrc/vb/logical_camera_2',LogicalCameraImage,bot_2_controller,queue_size=1)
rospy.Subscriber('/eyrc/vb/order_number',OrderDetailArray,get_order_number,queue_size=1)
rospy.spin()
if __name__ == '__main__':
global box_place_list
global should_start_conver
global ur5
global l
global temp
global vaccum
global dispatched_orders
global ic
global client_multi_socket
global freq
global action_client
main()
|
pipeline_execute.py
|
import logging
import d3m.runtime
import d3m.metadata.base
from sqlalchemy.orm import joinedload
from d3m.container import Dataset
from d3m.metadata import base as metadata_base
from alphad3m.schema import database, convert
from multiprocessing import Manager, Process
logger = logging.getLogger(__name__)
@database.with_db
def execute(pipeline_id, dataset, problem, results_path, msg_queue, db):
# Get pipeline from database
pipeline = (
db.query(database.Pipeline)
.filter(database.Pipeline.id == pipeline_id)
.options(joinedload(database.Pipeline.modules),
joinedload(database.Pipeline.connections))
).one()
logger.info('About to execute pipeline, id=%s, dataset=%r',
pipeline_id, dataset)
# Load data
dataset = Dataset.load(dataset)
logger.info('Loaded dataset')
json_pipeline = convert.to_d3m_json(pipeline)
logger.info('Pipeline to be executed:\n%s',
'\n'.join([x['primitive']['python_path'] for x in json_pipeline['steps']]))
d3m_pipeline = d3m.metadata.pipeline.Pipeline.from_json_structure(json_pipeline, )
runtime = d3m.runtime.Runtime(pipeline=d3m_pipeline, problem_description=problem,
context=metadata_base.Context.TESTING)
manager = Manager()
return_dict = manager.dict()
p = Process(target=worker, args=(runtime, dataset, return_dict))
p.start()
p.join(180) # Maximum 3 minutes
fit_results = return_dict['fit_results']
fit_results.check_success()
if results_path is not None:
logger.info('Storing fit results at %s', results_path)
fit_results.values['outputs.0'].to_csv(results_path)
else:
logger.info('NOT storing fit results')
return fit_results.values
def worker(runtime, dataset, return_dict):
return_dict['fit_results'] = runtime.fit(inputs=[dataset])
|
walking_simulation.py
|
#!/usr/bin/env python
import os
import numpy
import pyquaternion
import pcl
import tf
import rospy
import rospkg
import time
import threading
import random
import ctypes
from PIL import Image as pil
import pybullet as p
import pybullet_data
from pybullet_utils import gazebo_world_parser
from sensor_msgs.msg import Imu
from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import PointField
from geometry_msgs.msg import Twist
from quadruped_ctrl.srv import QuadrupedCmd, QuadrupedCmdResponse
get_last_vel = [0] * 3
robot_height = 0.30
motor_id_list = [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14]
init_new_pos = [0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6, 0.0, -0.8, 1.6,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
class StructPointer(ctypes.Structure):
_fields_ = [("eff", ctypes.c_double * 12)]
def convert_type(input):
ctypes_map = {int: ctypes.c_int,
float: ctypes.c_double,
str: ctypes.c_char_p
}
input_type = type(input)
if input_type is list:
length = len(input)
if length == 0:
rospy.logerr("convert type failed...input is "+input)
return 0
else:
arr = (ctypes_map[type(input[0])] * length)()
for i in range(length):
arr[i] = bytes(
input[i], encoding="utf-8") if (type(input[0]) is str) else input[i]
return arr
else:
if input_type in ctypes_map:
return ctypes_map[input_type](bytes(input, encoding="utf-8") if type(input) is str else input)
else:
rospy.logerr("convert type failed...input is "+input)
return 0
def thread_job():
rospy.spin()
def callback_gait(req):
cpp_gait_ctrller.set_gait_type(convert_type(req.cmd))
return QuadrupedCmdResponse(0, "get the gait")
def callback_mode(req):
cpp_gait_ctrller.set_robot_mode(convert_type(req.cmd))
return QuadrupedCmdResponse(0, "get the mode")
def callback_body_vel(msg):
vel = [msg.linear.x, msg.linear.y, msg.angular.x]
cpp_gait_ctrller.set_robot_vel(convert_type(vel))
def acc_filter(value, last_accValue):
a = 1
filter_value = a * value + (1 - a) * last_accValue
return filter_value
def pub_nav_msg(base_pos, imu_data):
pub_odom = rospy.Publisher("/robot_odom", Odometry, queue_size=100)
odom = Odometry()
odom.header.stamp = rospy.Time.now()
odom.header.frame_id ="world"
odom.child_frame_id = "world"
odom.pose.pose.position.x = base_pos[0]
odom.pose.pose.position.y = base_pos[1]
odom.pose.pose.position.z = base_pos[2]
odom.pose.pose.orientation.x = imu_data[3]
odom.pose.pose.orientation.y = imu_data[4]
odom.pose.pose.orientation.z = imu_data[5]
odom.pose.pose.orientation.w = imu_data[6]
pub_odom.publish(odom)
def pub_imu_msg(imu_data):
pub_imu = rospy.Publisher("/imu0", Imu, queue_size=100)
imu_msg = Imu()
imu_msg.linear_acceleration.x = imu_data[0]
imu_msg.linear_acceleration.y = imu_data[1]
imu_msg.linear_acceleration.z = imu_data[2]
imu_msg.angular_velocity.x = imu_data[7]
imu_msg.angular_velocity.y = imu_data[8]
imu_msg.angular_velocity.z = imu_data[9]
imu_msg.orientation.x = imu_data[3]
imu_msg.orientation.y = imu_data[4]
imu_msg.orientation.z = imu_data[5]
imu_msg.orientation.w = imu_data[6]
imu_msg.header.stamp = rospy.Time.now()
imu_msg.header.frame_id = "robot"
pub_imu.publish(imu_msg)
def get_data_from_sim():
global get_last_vel
get_orientation = []
get_matrix = []
get_velocity = []
get_invert = []
imu_data = [0] * 10
leg_data = [0] * 24
pose_orn = p.getBasePositionAndOrientation(boxId)
for i in range(4):
get_orientation.append(pose_orn[1][i])
# get_euler = p.getEulerFromQuaternion(get_orientation)
get_velocity = p.getBaseVelocity(boxId)
get_invert = p.invertTransform(pose_orn[0], pose_orn[1])
get_matrix = p.getMatrixFromQuaternion(get_invert[1])
# IMU data
imu_data[3] = pose_orn[1][0]
imu_data[4] = pose_orn[1][1]
imu_data[5] = pose_orn[1][2]
imu_data[6] = pose_orn[1][3]
imu_data[7] = get_matrix[0] * get_velocity[1][0] + get_matrix[1] * \
get_velocity[1][1] + get_matrix[2] * get_velocity[1][2]
imu_data[8] = get_matrix[3] * get_velocity[1][0] + get_matrix[4] * \
get_velocity[1][1] + get_matrix[5] * get_velocity[1][2]
imu_data[9] = get_matrix[6] * get_velocity[1][0] + get_matrix[7] * \
get_velocity[1][1] + get_matrix[8] * get_velocity[1][2]
# calculate the acceleration of the robot
linear_X = (get_velocity[0][0] - get_last_vel[0]) * freq
linear_Y = (get_velocity[0][1] - get_last_vel[1]) * freq
linear_Z = 9.8 + (get_velocity[0][2] - get_last_vel[2]) * freq
imu_data[0] = get_matrix[0] * linear_X + \
get_matrix[1] * linear_Y + get_matrix[2] * linear_Z
imu_data[1] = get_matrix[3] * linear_X + \
get_matrix[4] * linear_Y + get_matrix[5] * linear_Z
imu_data[2] = get_matrix[6] * linear_X + \
get_matrix[7] * linear_Y + get_matrix[8] * linear_Z
# joint data
joint_state = p.getJointStates(boxId, motor_id_list)
leg_data[0:12] = [joint_state[0][0], joint_state[1][0], joint_state[2][0],
joint_state[3][0], joint_state[4][0], joint_state[5][0],
joint_state[6][0], joint_state[7][0], joint_state[8][0],
joint_state[9][0], joint_state[10][0], joint_state[11][0]]
leg_data[12:24] = [joint_state[0][1], joint_state[1][1], joint_state[2][1],
joint_state[3][1], joint_state[4][1], joint_state[5][1],
joint_state[6][1], joint_state[7][1], joint_state[8][1],
joint_state[9][1], joint_state[10][1], joint_state[11][1]]
com_velocity = [get_velocity[0][0],
get_velocity[0][1], get_velocity[0][2]]
# get_last_vel.clear()
get_last_vel = []
get_last_vel = com_velocity
return imu_data, leg_data, pose_orn[0]
def reset_robot():
if terrain == "racetrack":
robot_z = 0.4
else:
robot_z = robot_height
p.resetBasePositionAndOrientation(
boxId, [0, 0, robot_z], [0, 0, 0, 1])
p.resetBaseVelocity(boxId, [0, 0, 0], [0, 0, 0])
for j in range(12):
p.resetJointState(boxId, motor_id_list[j], init_new_pos[j], init_new_pos[j+12])
cpp_gait_ctrller.init_controller(convert_type(
freq), convert_type([stand_kp, stand_kd, joint_kp, joint_kd]))
for _ in range(10):
p.stepSimulation()
imu_data, leg_data, _ = get_data_from_sim()
cpp_gait_ctrller.pre_work(convert_type(
imu_data), convert_type(leg_data))
for j in range(16):
force = 0
p.setJointMotorControl2(
boxId, j, p.VELOCITY_CONTROL, force=force)
cpp_gait_ctrller.set_robot_mode(convert_type(1))
for _ in range(200):
run()
p.stepSimulation
cpp_gait_ctrller.set_robot_mode(convert_type(0))
def init_simulator():
global boxId, reset, low_energy_mode, high_performance_mode, terrain, p
robot_start_pos = [0, 0, 0.42]
p.connect(p.GUI) # or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) # optionally
p.resetSimulation()
p.setTimeStep(1.0/freq)
p.setGravity(0, 0, -9.8)
reset = p.addUserDebugParameter("reset", 1, 0, 0)
low_energy_mode = p.addUserDebugParameter("low_energy_mode", 1, 0, 0)
high_performance_mode = p.addUserDebugParameter("high_performance_mode", 1, 0, 0)
p.resetDebugVisualizerCamera(0.2, 45, -30, [1, -1, 1])
heightPerturbationRange = 0.06
numHeightfieldRows = 256
numHeightfieldColumns = 256
if terrain == "plane":
planeShape = p.createCollisionShape(shapeType=p.GEOM_PLANE)
ground_id = p.createMultiBody(0, planeShape)
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=lateralFriction)
elif terrain == "random1":
heightfieldData = [0]*numHeightfieldRows*numHeightfieldColumns
for j in range(int(numHeightfieldColumns/2)):
for i in range(int(numHeightfieldRows/2)):
height = random.uniform(0, heightPerturbationRange)
heightfieldData[2*i+2*j*numHeightfieldRows] = height
heightfieldData[2*i+1+2*j*numHeightfieldRows] = height
heightfieldData[2*i+(2*j+1)*numHeightfieldRows] = height
heightfieldData[2*i+1+(2*j+1)*numHeightfieldRows] = height
terrainShape = p.createCollisionShape(shapeType=p.GEOM_HEIGHTFIELD, meshScale=[.05, .05, 1], heightfieldTextureScaling=(
numHeightfieldRows-1)/2, heightfieldData=heightfieldData, numHeightfieldRows=numHeightfieldRows, numHeightfieldColumns=numHeightfieldColumns)
ground_id = p.createMultiBody(0, terrainShape)
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=lateralFriction)
elif terrain == "random2":
terrain_shape = p.createCollisionShape(
shapeType=p.GEOM_HEIGHTFIELD,
meshScale=[.5, .5, .5],
fileName="heightmaps/ground0.txt",
heightfieldTextureScaling=128)
ground_id = p.createMultiBody(0, terrain_shape)
textureId = p.loadTexture(path+"/models/grass.png")
p.changeVisualShape(ground_id, -1, textureUniqueId=textureId)
p.resetBasePositionAndOrientation(ground_id, [1, 0, 0.2], [0, 0, 0, 1])
p.changeDynamics(ground_id, -1, lateralFriction=lateralFriction)
elif terrain == "stairs":
planeShape = p.createCollisionShape(shapeType=p.GEOM_PLANE)
ground_id = p.createMultiBody(0, planeShape)
# p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0.0872, 0, 0.9962])
p.resetBasePositionAndOrientation(ground_id, [0, 0, 0], [0, 0, 0, 1])
# many box
colSphereId = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.01])
colSphereId1 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.02])
colSphereId2 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.03])
colSphereId3 = p.createCollisionShape(
p.GEOM_BOX, halfExtents=[0.1, 0.4, 0.04])
# colSphereId4 = p.createCollisionShape(
# p.GEOM_BOX, halfExtents=[0.03, 0.03, 0.03])
p.createMultiBody(100, colSphereId, basePosition=[1.0, 1.0, 0.0])
p.changeDynamics(colSphereId, -1, lateralFriction=lateralFriction)
p.createMultiBody(100, colSphereId1, basePosition=[1.2, 1.0, 0.0])
p.changeDynamics(colSphereId1, -1, lateralFriction=lateralFriction)
p.createMultiBody(100, colSphereId2, basePosition=[1.4, 1.0, 0.0])
p.changeDynamics(colSphereId2, -1, lateralFriction=lateralFriction)
p.createMultiBody(100, colSphereId3, basePosition=[1.6, 1.0, 0.0])
p.changeDynamics(colSphereId3, -1, lateralFriction=lateralFriction)
# p.createMultiBody(10, colSphereId4, basePosition=[2.7, 1.0, 0.0])
# p.changeDynamics(colSphereId4, -1, lateralFriction=0.5)
p.changeDynamics(ground_id, -1, lateralFriction=lateralFriction)
elif terrain == "racetrack":
os.chdir(path)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
gazebo_world_parser.parseWorld(p, filepath = "worlds/racetrack_day.world")
p.configureDebugVisualizer(shadowMapResolution = 8192)
p.configureDebugVisualizer(shadowMapWorldSize = 25)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
boxId = p.loadURDF("mini_cheetah/mini_cheetah.urdf", robot_start_pos,
useFixedBase=False)
p.changeDynamics(boxId, 3, spinningFriction=spinningFriction)
p.changeDynamics(boxId, 7, spinningFriction=spinningFriction)
p.changeDynamics(boxId, 11, spinningFriction=spinningFriction)
p.changeDynamics(boxId, 15, spinningFriction=spinningFriction)
jointIds = []
for j in range(p.getNumJoints(boxId)):
p.getJointInfo(boxId, j)
jointIds.append(j)
reset_robot()
def run():
# get data from simulator
imu_data, leg_data, base_pos = get_data_from_sim()
#pub msg
pub_nav_msg(base_pos, imu_data)
pub_imu_msg(imu_data)
# call cpp function to calculate mpc tau
tau = cpp_gait_ctrller.toque_calculator(convert_type(
imu_data), convert_type(leg_data))
# set tau to simulator
p.setJointMotorControlArray(bodyUniqueId=boxId,
jointIndices=motor_id_list,
controlMode=p.TORQUE_CONTROL,
forces=tau.contents.eff)
# reset visual cam
# p.resetDebugVisualizerCamera(2.5, 45, -30, base_pos)
p.stepSimulation()
return
def camera_update():
rate_1 = rospy.Rate(20)
near = 0.1
far = 1000
step_index = 4
pixelWidth = int(320/step_index)
pixelHeight = int(240/step_index)
cameraEyePosition = [0.3, 0, 0.26436384367425125]
cameraTargetPosition = [1.0, 0, 0]
cameraUpVector = [45, 45, 0]
pub_pointcloud = PointCloud2()
pub_image = Image()
pointcloud_publisher = rospy.Publisher("/generated_pc", PointCloud2, queue_size=10)
image_publisher = rospy.Publisher("/cam0/image_raw", Image, queue_size=10)
robot_tf = tf.TransformBroadcaster()
while not rospy.is_shutdown():
cubePos, cubeOrn = p.getBasePositionAndOrientation(boxId)
get_matrix = p.getMatrixFromQuaternion(cubeOrn)
# T1 = numpy.mat([[0, -numpy.sqrt(2.0)/2.0, numpy.sqrt(2.0)/2.0, 0.25], [-1, 0, 0, 0],
# [0, -numpy.sqrt(2.0)/2.0, -numpy.sqrt(2.0)/2.0, 0], [0, 0, 0, 1]])
T1 = numpy.mat([[0, -1.0/2.0, numpy.sqrt(3.0)/2.0, 0.25], [-1, 0, 0, 0],
[0, -numpy.sqrt(3.0)/2.0, -1.0/2.0, 0], [0, 0, 0, 1]])
T2 = numpy.mat([[get_matrix[0], get_matrix[1], get_matrix[2], cubePos[0]],
[get_matrix[3], get_matrix[4], get_matrix[5], cubePos[1]],
[get_matrix[6], get_matrix[7], get_matrix[8], cubePos[2]],
[0, 0, 0, 1]])
T2_ = (T2.I)
T3_ = numpy.array(T2*T1)
cameraEyePosition[0] = T3_[0][3]
cameraEyePosition[1] = T3_[1][3]
cameraEyePosition[2] = T3_[2][3]
cameraTargetPosition = (numpy.mat(T3_)*numpy.array([[0],[0],[1],[1]]))[0:3]
q = pyquaternion.Quaternion(matrix=T3_)
cameraQuat = [q[1], q[2], q[3], q[0]]
robot_tf.sendTransform(cubePos, cubeOrn, rospy.Time.now(), "robot", "world")
robot_tf.sendTransform(cameraEyePosition, cameraQuat, rospy.Time.now(), "cam", "world")
robot_tf.sendTransform(cameraTargetPosition, cubeOrn, rospy.Time.now(), "tar", "world")
cameraUpVector = [0, 0, 1]
viewMatrix = p.computeViewMatrix(
cameraEyePosition, cameraTargetPosition, cameraUpVector)
aspect = float(pixelWidth) / float(pixelHeight)
projectionMatrix = p.computeProjectionMatrixFOV(60, aspect, near, far)
width, height, rgbImg, depthImg, _ = p.getCameraImage(pixelWidth,
pixelHeight,
viewMatrix=viewMatrix,
projectionMatrix=projectionMatrix,
shadow=1,
lightDirection=[1, 1, 1],
renderer=p.ER_BULLET_HARDWARE_OPENGL)
# point cloud mehted 1
# imgW = width
# imgH = height
# depth_img_buffer = numpy.reshape(depthImg, [imgH, imgW])
# projectionMatrix1 = numpy.asarray(projectionMatrix).reshape([4,4],order='F')
# viewMatrix1 = numpy.asarray(viewMatrix).reshape([4,4],order='F')
# tran_pix_world = numpy.linalg.inv(numpy.matmul(projectionMatrix1, viewMatrix1))
# pcl_data = pcl.PointCloud()
# pc_list = [0]*(imgW*imgH)
# pc = numpy.zeros(3)
# pixPos = numpy.ones(4)
# pixPosZ = (2.0*depth_img_buffer - 1.0)
# for h in range(0, imgH):
# for w in range(0, imgW):
# pixPos[0] = (2.0*w - imgW)/imgW
# pixPos[1] = -(2.0*h - imgH)/imgH
# pixPos[2] = pixPosZ[h,w]
# position = tran_pix_world.dot(pixPos)
# for ii in range(3):
# pc[ii] = position[ii] / position[3]
# pc_list[h*imgW+w]=pc.tolist()
# point cloud mehted 2
pc_list = []
pcl_data = pcl.PointCloud()
fx = (pixelWidth*projectionMatrix[0])/2.0
fy = (pixelHeight*projectionMatrix[5])/2.0
cx = (1-projectionMatrix[2])*pixelWidth/2.0
cy = (1+projectionMatrix[6])*pixelHeight/2.0
cloud_point = [0]*pixelWidth*pixelHeight*3
depthBuffer = numpy.reshape(depthImg,[pixelHeight,pixelWidth])
depth = depthBuffer
for h in range(0, pixelHeight):
for w in range(0, pixelWidth):
depth[h][w] =float(depthBuffer[h,w])
depth[h][w] = far * near / (far - (far - near) * depthBuffer[h][w])
Z= float(depth[h][w])
if (Z >4):
continue
if (Z< 0.01):
continue
X=(w-cx)*Z/fx
Y=(h-cy)*Z/fy
XYZ_= numpy.mat([[X],[Y],[Z],[1]])
XYZ =numpy.array(T3_*XYZ_)
# XYZ = numpy.array(XYZ_)
X= float(XYZ[0])
Y= float(XYZ[1])
Z= float(XYZ[2])
cloud_point[h*pixelWidth*3+w*3+0] = float(X)
cloud_point[h*pixelWidth*3+w*3+1] = float(Y)
cloud_point[h*pixelWidth*3+w*3+2] = float(Z)
pc_list.append([X,Y,Z])
pcl_data.from_list(pc_list)
pub_pointcloud.header.stamp = rospy.Time().now()
pub_pointcloud.header.frame_id = "world"
pub_pointcloud.height = 1
pub_pointcloud.width = len(pc_list)
pub_pointcloud.point_step = 12
pub_pointcloud.fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1)]
pub_pointcloud.data = numpy.asarray(pc_list, numpy.float32).tostring()
pointcloud_publisher.publish(pub_pointcloud)
# grey image
pub_image.header.stamp = rospy.Time().now()
pub_image.header.frame_id = "cam"
pub_image.width = width
pub_image.height = height
pub_image.encoding = "mono8"
pub_image.step = width
grey = pil.fromarray(rgbImg)
pub_image.data = numpy.asarray(grey.convert('L')).reshape([1,-1]).tolist()[0]
image_publisher.publish(pub_image)
rate_1.sleep()
def main():
cnt = 0
rate = rospy.Rate(freq) # hz
reset_flag = p.readUserDebugParameter(reset)
low_energy_flag = p.readUserDebugParameter(low_energy_mode)
high_performance_flag = p.readUserDebugParameter(high_performance_mode)
while not rospy.is_shutdown():
# check reset button state
if(reset_flag < p.readUserDebugParameter(reset)):
reset_flag = p.readUserDebugParameter(reset)
rospy.logwarn("reset the robot")
cnt = 0
reset_robot()
if(low_energy_flag < p.readUserDebugParameter(low_energy_mode)):
low_energy_flag = p.readUserDebugParameter(low_energy_mode)
rospy.logwarn("set robot to low energy mode")
cpp_gait_ctrller.set_robot_mode(convert_type(1))
if(high_performance_flag < p.readUserDebugParameter(high_performance_mode)):
high_performance_flag = p.readUserDebugParameter(high_performance_mode)
rospy.logwarn("set robot to high performance mode")
cpp_gait_ctrller.set_robot_mode(convert_type(0))
run()
cnt += 1
if cnt > 99999999:
cnt = 99999999
rate.sleep()
if __name__ == '__main__':
rospy.init_node('quadruped_simulator', anonymous=True)
terrain = rospy.get_param('/simulation/terrain')
camera = rospy.get_param('/simulation/camera')
lateralFriction = rospy.get_param('/simulation/lateralFriction')
spinningFriction = rospy.get_param('/simulation/spinningFriction')
freq = rospy.get_param('/simulation/freq')
stand_kp = rospy.get_param('/simulation/stand_kp')
stand_kd = rospy.get_param('/simulation/stand_kd')
joint_kp = rospy.get_param('/simulation/joint_kp')
joint_kd = rospy.get_param('/simulation/joint_kd')
rospy.loginfo("lateralFriction = " + str(lateralFriction) + " spinningFriction = " + str(spinningFriction))
rospy.loginfo(" freq = " + str(freq) + " PID = " + str([stand_kp, stand_kd, joint_kp, joint_kd]))
rospack = rospkg.RosPack()
path = rospack.get_path('quadruped_ctrl')
so_file = path.replace('src/quadruped_ctrl',
'devel/lib/libquadruped_ctrl.so')
if(not os.path.exists(so_file)):
so_file = path.replace('src/quadruped_ctrl',
'build/lib/libquadruped_ctrl.so')
if(not os.path.exists(so_file)):
rospy.logerr("cannot find cpp.so file")
cpp_gait_ctrller = ctypes.cdll.LoadLibrary(so_file)
cpp_gait_ctrller.toque_calculator.restype = ctypes.POINTER(StructPointer)
rospy.loginfo("find so file = " + so_file)
s = rospy.Service('gait_type', QuadrupedCmd, callback_gait)
s1 = rospy.Service('robot_mode', QuadrupedCmd, callback_mode)
rospy.Subscriber("cmd_vel", Twist, callback_body_vel, buff_size=10000)
init_simulator()
add_thread = threading.Thread(target=thread_job)
add_thread.start()
if camera:
add_thread_1 = threading.Thread(target=camera_update)
add_thread_1.start()
main()
|
producer_consumer.py
|
NUMBER_OF_PRODUCER_PROCESSES = 5
NUMBER_OF_CONSUMER_PROCESSES = 5
from multiprocessing import Process, Queue
import random, hashlib, time, os
class Consumer:
def __init__(self):
self.msg = None
def consume_msg(self, queue):
while True:
print('Got into consumer method, with pid: %s' % os.getpid())
# if queue.qsize() != 0:
if queue.empty():
self.msg = queue.get()
print('got msg: %s' % self.msg)
else:
self.msg = None
print('Queue looks empty')
time.sleep(random.randrange(5, 10))
class Producer:
def __init__(self):
self.msg = None
self.count = 0
def produce_msg(self, queue):
while True:
self.count += 1
print('Producing %d' % self.count)
if self.count > 5:
print("Producer terminated!")
break
print('Got into producer method, with pid: %s' % os.getpid())
# self.msg = hashlib.md5(random.random().__str__()).hexdigest()
self.msg = random.random().__str__()
queue.put(self.msg)
print('Produced msg: %s' % self.msg)
time.sleep(random.randrange(5, 10))
if __name__ == "__main__":
process_pool = []
queue = Queue()
producer = Producer()
consumer = Consumer()
for i in range(NUMBER_OF_PRODUCER_PROCESSES):
print('Producer %d' % i)
p = Process(target=producer.produce_msg, args=(queue,))
process_pool.append(p)
for i in range(NUMBER_OF_CONSUMER_PROCESSES):
print('Consumer %d' % i)
p = Process(target=consumer.consume_msg, args=(queue,))
process_pool.append(p)
for each in process_pool:
each.start()
|
roonapi.py
|
from __future__ import unicode_literals
import time
from .constants import *
from .roonapisocket import RoonApiWebSocket
from .discovery import RoonDiscovery
import threading
class RoonApi():
_roonsocket = None
_roondiscovery = None
_host = None
_port = None
_token = None
_exit = False
_zones = {}
_outputs = {}
_source_controls_request_id = None
_volume_controls_request_id = None
_source_controls = {}
_volume_controls = {}
_state_callbacks = []
ready = False
@property
def token(self):
''' the authentication key that was retrieved from the registration with Roon'''
return self._token
@property
def zones(self):
''' all zones, returned as dict'''
return self._zones
@property
def outputs(self):
''' all outputs, returned as dict'''
return self._outputs
def zone_by_name(self, zone_name):
''' get zone details by name'''
for zone in self.zones.values():
if zone["display_name"] == zone_name:
return zone
return None
def output_by_name(self, output_name):
''' get the output details from the name'''
for output in self.outputs.values():
if output["display_name"] == output_name:
return output
return None
def zone_by_output_id(self, output_id):
''' get the zone details by output id'''
for zone in self.zones.values():
for output in zone["outputs"]:
if output["output_id"] == output_id:
return zone
return None
def zone_by_output_name(self, output_name):
'''
get the zone details by an output name
params:
output_name: the name of the output
returns: full zone details (dict)
'''
for zone in self.zones.values():
for output in zone["outputs"]:
if output["display_name"] == output_name:
return zone
return None
def get_image(self, image_key, scale="fit", width=500, height=500):
'''
get the image url for the specified image key
params:
image_key: the key for the image as retrieved in other api calls
scale: optional (value of fit, fill or stretch)
width: the width of the image (required if scale is specified)
height: the height of the image (required if scale is set)
returns: string with the full url to the image
'''
return "http://%s:%s/api/image/%s?scale=%s&width=%s&height=%s" %(self._host, self._port, image_key, scale, width, height)
def playback_control(self, zone_or_output_id, control="play"):
'''
send player command to the specified zone
params:
zone_or_output_id: the id of the zone or output
control:
* "play" - If paused or stopped, start playback
* "pause" - If playing or loading, pause playback
* "playpause" - If paused or stopped, start playback. If playing or loading, pause playback.
* "stop" - Stop playback and release the audio device immediately
* "previous" - Go to the start of the current track, or to the previous track
* "next" - Advance to the next track
'''
data = {
"zone_or_output_id": zone_or_output_id,
"control": control
}
return self._request(ServiceTransport + "/control", data)
def standby(self, output_id, control_key=None):
'''
send standby command to the specified output
params:
output_id: the id of the output to put in standby
control_key: The control_key that identifies the source_control that is to be put into standby.
If omitted, then all source controls on this output that support standby will be put into standby.
'''
data = { "output_id": output_id, "control_key": control_key }
return self._request(ServiceTransport + "/standby", data)
def convenience_switch(self, output_id, control_key=None):
'''
Convenience switch an output, taking it out of standby if needed.
params:
output_id: the id of the output that should be convenience-switched.
control_key: The control_key that identifies the source_control that is to be switched.
If omitted, then all controls on this output will be convenience switched.
'''
data = { "output_id": output_id, "control_key": control_key }
return self._request(ServiceTransport + "/convenience_switch", data)
def mute(self, output_id, mute=True):
'''
Mute/unmute an output.
params:
output_id: the id of the output that should be muted/unmuted
mute: bool if the output should be muted. Will unmute if set to False
'''
how = "mute" if mute else "unmute"
data = { "output_id": output_id, "how": how }
return self._request(ServiceTransport + "/mute", data)
def change_volume(self, output_id, value, method="absolute"):
'''
Change the volume of an output. For convenience you can always just give te new volume level as percentage
params:
output_id: the id of the output
value: The new volume value, or the increment value or step (as percentage)
method: How to interpret the volume ('absolute'|'relative'|'relative_step')
'''
if not "volume" in self._outputs[output_id]:
LOGGER.info("This endpoint has fixed volume.")
return None
if method == "absolute":
if self._outputs[output_id]["volume"]["type"] == "db":
value = int((float(value) / 100) * 80) - 80
data = { "output_id": output_id, "how": method, "value": value }
return self._request(ServiceTransport + "/change_volume", data)
def seek(self, zone_or_output_id, seconds, method="absolute"):
'''
Seek to a time position within the now playing media
params:
zone_or_output_id: the id of the zone or output
seconds: The target seek position
method: How to interpret the target seek position ('absolute'|'relative')
'''
data = { "zone_or_output_id": zone_or_output_id, "how": method, "seconds": seconds }
return self._request(ServiceTransport + "/seek", data)
def shuffle(self, zone_or_output_id, shuffle=True):
'''
Enable/disable shuffle
params:
zone_or_output_id: the id of the output or zone
shuffle: bool if shuffle should be enabled. False will disable shuffle
'''
data = { "zone_or_output_id": zone_or_output_id, "shuffle": shuffle }
return self._request(ServiceTransport + "/change_settings", data)
def repeat(self, zone_or_output_id, repeat=True):
'''
Enable/disable repeat (loop mode)
params:
zone_or_output_id: the id of the output or zone
repeat: bool if repeat should be enabled. False will disable shuffle
'''
loop = "loop" if repeat else "disabled"
data = { "zone_or_output_id": zone_or_output_id, "loop": loop }
return self._request(ServiceTransport + "/change_settings", data)
def transfer_zone(self, from_zone_or_output_id, to_zone_or_output_id):
'''
Transfer the current queue from one zone to another
params:
from_zone_or_output_id - The source zone or output
to_zone_or_output_id - The destination zone or output
'''
data = { "from_zone_or_output_id": from_zone_or_output_id,
"to_zone_or_output_id": to_zone_or_output_id }
return self._request(ServiceTransport + "/transfer_zone", data)
def group_outputs(self, output_ids):
'''
Create a group of synchronized audio outputs
params:
output_ids - The outputs to group. The first output's zone's queue is preserved.
'''
data = { "output_ids": output_ids }
return self._request(ServiceTransport + "/group_outputs", data)
def ungroup_outputs(self, output_ids):
'''
Ungroup outputs previous grouped
params:
output_ids - The outputs to ungroup.
'''
data = { "output_ids": output_ids }
return self._request(ServiceTransport + "/ungroup_outputs", data)
def register_source_control(self, control_key, display_name, callback, initial_state="selected", supports_standby=True):
''' register a new source control on the api'''
if control_key in self._source_controls:
LOGGER.error("source_control %s is already registered!" % control_key)
return
control_data = {
"display_name": display_name,
"supports_standby": supports_standby,
"status": initial_state,
"control_key": control_key
}
self._source_controls[control_key] = (callback, control_data)
if self._source_controls_request_id:
data = {"controls_added":[ control_data ]}
self._roonsocket.send_continue(self._source_controls_request_id, data)
def update_source_control(self, control_key, new_state):
''' update an existing source control on the api'''
if control_key not in self._source_controls:
LOGGER.warning("source_control %s is not (yet) registered!" % control_key)
return
if not self._source_controls_request_id:
LOGGER.warning("Not yet registered, can not update source control")
return False
self._source_controls[control_key][1]["status"] = new_state
data = {"controls_changed": [ self._source_controls[control_key][1] ] }
self._roonsocket.send_continue(self._source_controls_request_id, data)
def register_volume_control(self, control_key, display_name, callback, initial_volume=0, volume_type="number", volume_step=2, volume_min=0, volume_max=100, is_muted=False):
''' register a new volume control on the api'''
if control_key in self._volume_controls:
LOGGER.error("source_control %s is already registered!" % control_key)
return
control_data = {
"display_name": display_name,
"volume_type": volume_type,
"volume_min": volume_min,
"volume_max": volume_max,
"volume_value": initial_volume,
"volume_step": volume_step,
"is_muted": is_muted,
"control_key": control_key
}
self._volume_controls[control_key] = (callback, control_data)
if self._volume_controls_request_id:
data = {"controls_added":[ control_data ]}
self._roonsocket.send_continue(self._volume_controls_request_id, data)
def update_volume_control(self, control_key, volume=None, mute=None):
''' update an existing volume control, report its state to Roon '''
if control_key not in self._volume_controls:
LOGGER.warning("volume_control %s is not (yet) registered!" % control_key)
return
if not self._volume_controls_request_id:
LOGGER.warning("Not yet registered, can not update volume control")
return False
if volume != None:
self._volume_controls[control_key][1]["volume_value"] = volume
if mute != None:
self._volume_controls[control_key][1]["is_muted"] = mute
data = {"controls_changed": [ self._volume_controls[control_key][1] ] }
self._roonsocket.send_continue(self._volume_controls_request_id, data)
def register_state_callback(self, callback, event_filter=None, id_filter=None):
'''
register a callback to be informed about changes to zones or outputs
params:
callback: method to be called when state changes occur, it will be passed an event param as string and a list of changed objects
callback will be called with params:
- event: string with name of the event ("zones_changed", "zones_seek_changed", "outputs_changed")
- a list with the zone or output id's that changed
event_filter: only callback if the event is in this list
id_filter: one or more zone or output id's or names to filter on (list or string)
'''
if not event_filter:
event_filter = []
elif not isinstance(event_filter, list):
event_filter = [event_filter]
if not id_filter:
id_filter = []
elif not isinstance(id_filter, list):
id_filter = [id_filter]
self._state_callbacks.append( (callback, event_filter, id_filter) )
def register_queue_callback(self, callback, zone_or_output_id=""):
'''
subscribe to queue change events
callback: function which will be called with the updated data (provided as dict object
zone_or_output_id: If provided, only listen for updates for this zone or output
'''
if zone_or_output_id:
opt_data = {"zone_or_output_id": zone_or_output_id}
else:
opt_data = None
self._roonsocket.subscribe(ServiceTransport, "queue", callback, opt_data)
def browse_browse(self, opts):
'''
undocumented and complex browse call on the roon api
reference: https://github.com/RoonLabs/node-roon-api-browse/blob/master/lib.js
'''
return self._request(ServiceBrowse + "/browse", opts)
def browse_load(self, opts):
'''
undocumented and complex browse call on the roon api
reference: https://github.com/RoonLabs/node-roon-api-browse/blob/master/lib.js
'''
return self._request(ServiceBrowse + "/load", opts)
def browse_pop_all(self, opts):
'''
undocumented and complex browse call on the roon api
reference: https://github.com/RoonLabs/node-roon-api-browse/blob/master/lib.js
'''
return self._request(ServiceBrowse + "/pop_all", opts)
def browse_pop(self, opts):
'''
undocumented and complex browse call on the roon api
reference: https://github.com/RoonLabs/node-roon-api-browse/blob/master/lib.js
'''
return self._request(ServiceBrowse + "/pop", opts)
def browse_by_path(self, search_paths, zone_or_output_id="", offset=0, search_input=None):
'''
workaround to browse content by specifying the path to the content
params:
search_paths: a list of names to look for in the hierarchie.
e.g. ["Playlists", "My Favourite Playlist"]
zone_or_output_id: id of a zone or output on which behalf the search is performed.
can be ommitted for browsing but required for actions (play etc.)
offset: a list will only return 100 items, to get more use the offset
returns: a list of items (if found) as returned by the browse function
'''
opts = {"hierarchy": "browse", "pop_all": True}
if zone_or_output_id:
opts["zone_or_output_id"] = zone_or_output_id
# go to first level (home)
result = self.browse_browse(opts)
if not result:
return None
# items at first level (mainmenu items)
result = self.browse_load(opts)
opts["pop_all"] = False
for search_for in search_paths:
if not result or not "items" in result:
break
for item in result["items"]:
if item["title"] == search_for or search_input and item.get("input_prompt"):
opts["item_key"] = item["item_key"]
if item.get("input_prompt"):
opts["input"] = search_input
result = self.browse_browse(opts)
if result and "list" in result and result["list"]["count"] > 100:
opts["offset"] = offset
opts["set_display_offset"] = offset
result = self.browse_load(opts)
return result
def playlists(self, offset=0):
''' return the list of playlists'''
return self.browse_by_path(["Playlists"], offset=offset)
def internet_radio(self, offset=0):
''' return the list of internet radio stations'''
return self.browse_by_path(["Internet Radio"], offset=offset)
def artists(self, offset=0):
'''return the list of artists in the library'''
return self.browse_by_path(["Library", "Artists"], offset=offset)
def albums(self, offset=0):
'''return the list of albums in the library'''
return self.browse_by_path(["Library", "Albums"], offset=offset)
def tracks(self, offset=0):
'''return the list of tracks in the library'''
return self.browse_by_path(["Library", "Tracks"], offset=offset)
def tags(self, offset=0):
'''return the list of tags in the library'''
return self.browse_by_path(["Library", "Tags"], offset=offset)
def genres(self, subgenres_for="", offset=0):
'''return the list of genres in the library'''
return self.browse_by_path(["Genres", subgenres_for], offset=offset)
def play_playlist(self, zone_or_output_id, playlist_title, shuffle=False):
''' play playlist by name on the specified zone'''
play_action = "Shuffle" if shuffle else "Play Now"
return self.browse_by_path(["Playlists", playlist_title, "Play Playlist", play_action], zone_or_output_id)
def queue_playlist(self, zone_or_output_id, playlist_title):
''' queue playlist by name on the specified zone'''
return self.browse_by_path(["Playlists", playlist_title, "Play Playlist", "Queue"], zone_or_output_id)
def play_radio(self, zone_or_output_id, radio_title):
''' play internet radio by name on the specified zone'''
return self.browse_by_path(["Internet Radio", radio_title, "Play Radio", "Play Now"], zone_or_output_id)
def play_genre(self, zone_or_output_id, genre_name, subgenre="", shuffle=False):
'''play specified genre on the specified zone'''
action = "Shuffle" if shuffle else "Play Genre"
if subgenre:
return self.browse_by_path(["Genres", genre_name, subgenre, "Play Genre", action], zone_or_output_id)
else:
return self.browse_by_path(["Genres", genre_name, "Play Genre", action], zone_or_output_id)
def search_artists(self, search_input):
''' search for artists by name'''
return self.browse_by_path(["Library", "Search", "Artists"], search_input=search_input)
############# private methods ##################
def __init__(self, appinfo, token=None, host=None, port=9100, blocking_init=True):
'''
Set up the connection with Roon
appinfo: a dict of the required information about the app that should be connected to the api
token: used for presistant storage of the auth token, will be set to token attribute if retrieved. You should handle saving of the key yourself
host: optional the ip or hostname of the Roon server, will be auto discovered if ommitted
port: optional the http port of the Roon websockets api. Should be default of 9100
blocking_init: By default the init will halt untill the socket is connected and the app is authenticated,
if you set bool to False the init will continue but you will only receive data once the connection is fully initialized.
The latter is preferred if you're (only) using the callbacks
'''
self._appinfo = appinfo
self._token = token
if not appinfo or not isinstance(appinfo, dict):
raise("appinfo missing or in incorrect format!")
if host and port:
self._server_discovered(host, port)
else:
self._roondiscovery = RoonDiscovery(self._server_discovered)
self._roondiscovery.start()
# block untill we're ready
if blocking_init:
while not self.ready and not self._exit:
time.sleep(1)
# start socket watcher
th = threading.Thread(target=self._socket_watcher)
th.daemon = True
th.start()
def __exit__(self, type, value, tb):
self.stop()
def __enter__(self):
return self
def stop(self):
self._exit = True
if self._roondiscovery:
self._roondiscovery.stop()
if self._roonsocket:
self._roonsocket.stop()
def _server_discovered(self, host, port):
''' called when the roon server is (auto) discovered on the network'''
LOGGER.info("Connecting to Roon server %s:%s" % (host, port))
ws_address = "ws://%s:%s/api" %(host, port)
self._host = host
self._port = port
self._roonsocket = RoonApiWebSocket(ws_address)
self._roonsocket.source_controls_callback = self._on_source_control_request
self._roonsocket.volume_controls_callback = self._on_volume_control_request
self._roonsocket.connected_callback = self._socket_connected
self._roonsocket.registered_calback = self._server_registered
self._roonsocket.start()
def _socket_connected(self):
''' the websocket connection is connected successfully'''
LOGGER.info("Connection with roon websockets (re)created.")
self.ready = False
self._source_controls_request_id = None
self._volume_controls_request_id = None
# authenticate / register
# warning: at first launch the user has to approve the app in the Roon settings.
appinfo = self._appinfo.copy()
appinfo["required_services"] = [ServiceTransport, ServiceBrowse]
appinfo["provided_services"] = [ControlVolume, ControlSource]
if self._token:
appinfo["token"] = self._token
if not self._token:
LOGGER.info("The application should be approved within Roon's settings.")
else:
LOGGER.info("Registering the app with Roon...")
self._roonsocket.send_request(ServiceRegistry + "/register", appinfo)
def _server_registered(self, reginfo):
LOGGER.info("Registered to Roon server %s" % reginfo["display_name"])
LOGGER.debug(reginfo)
self._token = reginfo["token"]
# fill zones and outputs dicts one time so the data is available right away
if not self._zones:
self._zones = self._get_zones()
if not self._outputs:
self._outputs = self._get_outputs()
# subscribe to state change events
self._roonsocket.subscribe(ServiceTransport, "zones", self._on_state_change)
self._roonsocket.subscribe(ServiceTransport, "outputs", self._on_state_change)
# set flag that we're fully initialized (used for blocking init)
self.ready = True
def _on_state_change(self, msg):
''' process messages we receive from the roon websocket into a more usable format'''
events = []
if not msg or not isinstance(msg, dict):
return
for state_key, state_values in msg.items():
changed_ids = []
filter_keys = []
if state_key in ["zones_seek_changed", "zones_changed", "zones_added", "zones"]:
for zone in state_values:
if zone["zone_id"] in self._zones:
self._zones[zone["zone_id"]].update(zone)
else:
self._zones[zone["zone_id"]] = zone
changed_ids.append(zone["zone_id"])
if "display_name" in zone:
filter_keys.append(zone["display_name"])
if "outputs" in zone:
for output in zone["outputs"]:
filter_keys.append(output["output_id"])
filter_keys.append(output["display_name"])
event = "zones_seek_changed" if state_key == "zones_seek_changed" else "zones_changed"
events.append((event, changed_ids, filter_keys))
elif state_key in ["outputs_changed", "outputs_added", "outputs"]:
for output in state_values:
if output["output_id"] in self._outputs:
self._outputs[output["output_id"]].update(output)
else:
self._outputs[output["output_id"]] = output
changed_ids.append(output["output_id"])
filter_keys.append(output["display_name"])
filter_keys.append(output["zone_id"])
event = "outputs_changed"
events.append((event, changed_ids, filter_keys))
elif state_key == "zones_removed":
for item in state_values:
del self._zones[item]
elif state_key == "outputs_removed":
for item in state_values:
del self._outputs[item]
else:
LOGGER.warning("unknown state change: %s" % msg)
for event, changed_ids, filter_keys in events:
filter_keys.extend(changed_ids)
for item in self._state_callbacks:
callback = item[0]
event_filter = item[1]
id_filter = item[2]
if event_filter and (event not in event_filter):
continue
if id_filter and set(id_filter).isdisjoint(filter_keys):
continue
try:
callback(event, changed_ids)
except Exception:
LOGGER.exception("Error while executing callback!")
def _get_outputs(self):
outputs = {}
data = self._request(ServiceTransport + "/get_outputs")
if data and "outputs" in data:
for output in data["outputs"]:
outputs[output["output_id"]] = output
return outputs
def _get_zones(self):
zones = {}
data = self._request(ServiceTransport + "/get_zones")
if data and "zones" in data:
for zone in data["zones"]:
zones[zone["zone_id"]] = zone
return zones
def _request(self, command, data=None):
''' send command and wait for result '''
if not self._roonsocket:
retries = 20
while (not self.ready or not self._roonsocket) and retries:
retries -= 1
time.sleep(0.2)
if not self.ready or not self._roonsocket:
LOGGER.warning("socket is not yet ready")
if not self._roonsocket:
return None
request_id = self._roonsocket.send_request(command, data)
result = None
retries = 50
while retries:
result = self._roonsocket.results.get(request_id)
if result:
break
else:
retries -= 1
time.sleep(0.05)
try:
del self._roonsocket.results[request_id]
except KeyError:
pass
return result
def _on_source_control_request(self, event, request_id, data):
''' got request from roon server for a source control registered on this endpoint'''
if event == "subscribe_controls":
LOGGER.debug("found subscription ID for source controls: %s " % request_id)
self._roonsocket.send_continue(request_id, {"controls_added": []})
# send all source controls already registered (handle connection loss)
controls = []
for callback, control_data in self._source_controls.values():
controls.append(control_data)
self._roonsocket.send_continue(request_id, { "controls_added":controls })
self._source_controls_request_id = request_id
elif data and data.get("control_key"):
control_key = data["control_key"]
try:
# launch callback
self._roonsocket.send_complete(request_id, "Success")
self._source_controls[control_key][0](control_key, event)
except Exception:
LOGGER.exception("Error in source_control callback")
self._roonsocket.send_complete(request_id, "Error")
def _on_volume_control_request(self, event, request_id, data):
''' got request from roon server for a volume control registered on this endpoint'''
if event == "subscribe_controls":
LOGGER.debug("found subscription ID for volume controls: %s " % request_id)
# send all volume controls already registered (handle connection loss)
controls = []
for callback, control_data in self._volume_controls.values():
controls.append(control_data)
self._roonsocket.send_continue(request_id, { "controls_added":controls })
self._source_controls_request_id = request_id
self._volume_controls_request_id = request_id
elif data and data.get("control_key"):
control_key = data["control_key"]
if event == "set_volume" and data["mode"] == "absolute":
value = data["value"]
elif event == "set_volume" and data["mode"] == "relative":
value = self._volume_controls[control_key][0]["volume_value"] + data["value"]
elif event == "set_volume" and data["mode"] == "relative_step":
value = self._volume_controls[control_key][0]["volume_value"] + (data["value"] * data["volume_step"])
elif event == "set_mute":
value = data["mode"] == "on"
else:
return
try:
self._roonsocket.send_complete(request_id, "Success")
self._volume_controls[control_key][0](control_key, event, value)
except Exception:
LOGGER.exception("Error in volume_control callback")
self._roonsocket.send_complete(request_id, "Error")
def _socket_watcher(self):
''' monitor the connection state of the socket and reconnect if needed'''
while not self._exit:
if self._roonsocket and self._roonsocket.failed_state:
LOGGER.warning("Socket connection lost! Will try to reconnect in 20s")
count = 0
while not self._exit and count < 21:
count += 1
time.sleep(1)
if not self._exit:
self._server_discovered(self._host, self._port)
time.sleep(2)
|
spinner.py
|
# -*- coding: utf-8 -*-
"""A minimal non-colored version of https://pypi.org/project/halo, to track list progress"""
from __future__ import absolute_import, unicode_literals
import os
import sys
import threading
from collections import OrderedDict
from datetime import datetime
import py
threads = []
if os.name == "nt":
import ctypes
class _CursorInfo(ctypes.Structure):
_fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
class Spinner(object):
CLEAR_LINE = "\033[K"
max_width = 120
frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
def __init__(self, enabled=True, refresh_rate=0.1):
self.refresh_rate = refresh_rate
self.enabled = enabled
self._file = sys.stdout
self.stream = py.io.TerminalWriter(file=self._file)
self._envs = OrderedDict()
self._frame_index = 0
def clear(self):
if self.enabled:
self.stream.write("\r")
self.stream.write(self.CLEAR_LINE)
def render(self):
while True:
self._stop_spinner.wait(self.refresh_rate)
if self._stop_spinner.is_set():
break
self.render_frame()
return self
def render_frame(self):
if self.enabled:
self.clear()
self.stream.write("\r{}".format(self.frame()))
def frame(self):
frame = self.frames[self._frame_index]
self._frame_index += 1
self._frame_index = self._frame_index % len(self.frames)
text_frame = "[{}] {}".format(len(self._envs), " | ".join(self._envs))
if len(text_frame) > self.max_width - 1:
text_frame = "{}...".format(text_frame[: self.max_width - 1 - 3])
return "{} {}".format(*[(frame, text_frame)][0])
def __enter__(self):
if self.enabled:
self.disable_cursor()
self.render_frame()
self._stop_spinner = threading.Event()
self._spinner_thread = threading.Thread(target=self.render)
self._spinner_thread.setDaemon(True)
self._spinner_thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._stop_spinner.is_set():
if self._spinner_thread:
self._stop_spinner.set()
self._spinner_thread.join()
self._frame_index = 0
if self.enabled:
self.clear()
self.enable_cursor()
return self
def add(self, name):
self._envs[name] = datetime.now()
def succeed(self, key):
self.finalize(key, "✔ OK", green=True)
def fail(self, key):
self.finalize(key, "✖ FAIL", red=True)
def skip(self, key):
self.finalize(key, "⚠ SKIP", white=True)
def finalize(self, key, status, **kwargs):
start_at = self._envs[key]
del self._envs[key]
if self.enabled:
self.clear()
self.stream.write(
"{} {} in {}{}".format(
status, key, td_human_readable(datetime.now() - start_at), os.linesep
),
**kwargs
)
if not self._envs:
self.__exit__(None, None, None)
def disable_cursor(self):
if self._file.isatty():
if os.name == "nt":
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle(-11)
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = False
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
elif os.name == "posix":
self.stream.write("\033[?25l")
def enable_cursor(self):
if self._file.isatty():
if os.name == "nt":
ci = _CursorInfo()
handle = ctypes.windll.kernel32.GetStdHandle(-11)
ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
ci.visible = True
ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
elif os.name == "posix":
self.stream.write("\033[?25h")
def td_human_readable(delta):
seconds = int(delta.total_seconds())
periods = [
("year", 60 * 60 * 24 * 365),
("month", 60 * 60 * 24 * 30),
("day", 60 * 60 * 24),
("hour", 60 * 60),
("minute", 60),
("second", 1),
]
texts = []
for period_name, period_seconds in periods:
if seconds > period_seconds or period_seconds == 1:
period_value, seconds = divmod(seconds, period_seconds)
if period_name == "second":
ms = delta.total_seconds() - int(delta.total_seconds())
period_value += round(ms, 3)
has_s = "s" if period_value > 1 else ""
texts.append("{} {}{}".format(period_value, period_name, has_s))
return ", ".join(texts)
|
__init__.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""
Implement a Flask-based server for controlling the simulation.
In particular, it provides REST methods to start/stop a sandbox and an agent, alongside a GUI to let the
user to easily change the parameters.
"""
import docker
import logging
import os
import re
from queue import Empty
from threading import Thread
from flask import Flask
from tac.gui.launcher import home, api
from tac.gui.launcher.api.resources.sandboxes import sandbox_queue, SandboxRunner
logger = logging.getLogger(__name__)
class CustomFlask(Flask):
"""Wrapper of the Flask app."""
def __init__(self, *args, **kwargs):
"""Initialize our wrapper."""
super().__init__(*args, **kwargs)
self.running = False
self.sandbox_runner_thread = Thread(target=self.run_sandbox_queue)
def run_sandbox_queue(self):
"""Consume elements from the sandbox queue."""
while self.running:
logger.debug("Waiting for sandbox to execute...")
try:
sandbox_runner = sandbox_queue.get(timeout=5.0) # type: SandboxRunner
logger.debug(
"Launching the sandbox with id: {}".format(sandbox_runner.id)
)
sandbox_runner()
logger.debug("Waiting until it completes.")
sandbox_runner.wait()
logger.debug(
"Sandbox with ID={} has been completed.".format(sandbox_runner.id)
)
except Empty:
pass
logger.debug("Exiting from the job loop...")
def setup(self):
"""Set up resources before running the main app."""
logger.debug("Setup method called.")
kill_any_running_oef()
self.running = True
self.sandbox_runner_thread.start()
def run(self, *args, **kwargs):
"""Wrap the run method to hide setup and teardown operations to the user."""
try:
self.setup()
super().run(*args, **kwargs)
finally:
self.teardown()
def teardown(self):
"""Teardown the allocated resources."""
logger.debug("Teardown method called.")
self.running = False
self.sandbox_runner_thread.join()
def kill_any_running_oef():
"""Kill any running OEF instance."""
client = docker.from_env()
for container in client.containers.list():
if any(re.match("fetchai/oef-search", tag) for tag in container.image.tags):
logger.debug("Stopping existing OEF Node...")
container.stop()
def create_app(test_config=None):
"""Create and configure an instance of the Flask application."""
app = CustomFlask(__name__, instance_relative_config=True)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile("config.py", silent=True)
else:
# load the test config if passed in
app.config.update(test_config)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# register api endpoints
api.create_api(app)
# register home pages
app.register_blueprint(home.bp)
return app
|
debug.py
|
import code
import gc
import logging
import os
import signal
import socket
import threading
import traceback
import tracemalloc
from types import FrameType
from django.conf import settings
from django.utils.timezone import now as timezone_now
from typing import Optional
logger = logging.getLogger('zulip.debug')
# Interactive debugging code from
# http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
# (that link also points to code for an interactive remote debugger
# setup, which we might want if we move Tornado to run in a daemon
# rather than via screen).
def interactive_debug(sig: int, frame: FrameType) -> None:
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i = code.InteractiveConsole(d)
i.interact(message)
# SIGUSR1 => Just print the stack
# SIGUSR2 => Print stack + open interactive debugging shell
def interactive_debug_listen() -> None:
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
signal.signal(signal.SIGUSR2, interactive_debug)
def tracemalloc_dump() -> None:
if not tracemalloc.is_tracing():
logger.warning("pid {}: tracemalloc off, nothing to dump"
.format(os.getpid()))
return
# Despite our name for it, `timezone_now` always deals in UTC.
basename = "snap.{}.{}".format(os.getpid(),
timezone_now().strftime("%F-%T"))
path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)
os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)
gc.collect()
tracemalloc.take_snapshot().dump(path)
procstat = open('/proc/{}/stat'.format(os.getpid()), 'rb').read().split()
rss_pages = int(procstat[23])
logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
.format(tracemalloc.get_traced_memory()[0] // 1048576,
tracemalloc.get_traced_memory()[1] // 1048576,
tracemalloc.get_tracemalloc_memory() // 1048576,
rss_pages // 256,
basename))
def tracemalloc_listen_sock(sock: socket.socket) -> None:
logger.debug('pid {}: tracemalloc_listen_sock started!'.format(os.getpid()))
while True:
sock.recv(1)
tracemalloc_dump()
listener_pid = None # Optional[int]
def tracemalloc_listen() -> None:
global listener_pid
if listener_pid == os.getpid():
# Already set up -- and in this process, not just its parent.
return
logger.debug('pid {}: tracemalloc_listen working...'.format(os.getpid()))
listener_pid = os.getpid()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
path = "/tmp/tracemalloc.{}".format(os.getpid())
sock.bind(path)
thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock),
daemon=True)
thread.start()
logger.debug('pid {}: tracemalloc_listen done: {}'.format(
os.getpid(), path))
def maybe_tracemalloc_listen() -> None:
'''If tracemalloc tracing enabled, listen for requests to dump a snapshot.
To trigger once this is listening:
echo | socat -u stdin unix-sendto:/tmp/tracemalloc.$pid
To enable in the Zulip web server: edit /etc/zulip/uwsgi.ini ,
and add e.g. ` PYTHONTRACEMALLOC=5` to the `env=` line.
This function is called in middleware, so the process will
automatically start listening.
To enable in other contexts: see upstream docs
https://docs.python.org/3/library/tracemalloc .
You may also have to add a call to this function somewhere.
'''
if os.environ.get('PYTHONTRACEMALLOC'):
# If the server was started with `tracemalloc` tracing on, then
# listen for a signal to dump `tracemalloc` snapshots.
tracemalloc_listen()
|
gamepadClient.py
|
'''
Simple python script to get Asyncronous gamepad inputs
Thomas FLAYOLS - LAAS CNRS
From https://github.com/thomasfla/solopython
Use:
To display data, run "python gamepadClient.py"
'''
import inputs
import time
from multiprocessing import Process
from multiprocessing.sharedctypes import Value
from ctypes import c_double, c_bool
class GamepadClient():
def __init__(self):
self.running = Value(c_bool, lock=True)
self.startButton = Value(c_bool, lock=True)
self.leftJoystickX = Value(c_double, lock=True)
self.leftJoystickY = Value(c_double, lock=True)
self.rightJoystickX = Value(c_double, lock=True)
self.rightJoystickY = Value(c_double, lock=True)
self.R1Button = Value(c_bool, lock=True)
self.L1Button = Value(c_bool, lock=True)
self.startButton.value = False
self.leftJoystickX.value = 0.0
self.leftJoystickY.value = 0.0
self.rightJoystickX.value = 0.0
self.rightJoystickY.value = 0.0
self.R1Button.value = False
self.L1Button.value = False
args = (self.running, self.startButton, self.leftJoystickX,
self.leftJoystickY, self.rightJoystickX, self.rightJoystickY, self.R1Button, self.L1Button)
self.process = Process(target=self.run, args=args)
self.process.start()
time.sleep(0.2)
def run(self, running, startButton, leftJoystickX, leftJoystickY, rightJoystickX, rightJoystickY, R1Button, L1Button):
running.value = True
while(running.value):
events = inputs.get_gamepad()
for event in events:
#print(event.ev_type, event.code, event.state)
if event.ev_type == 'Absolute':
if event.code == 'ABS_X':
leftJoystickX.value = event.state / 32768.0
if event.code == 'ABS_Y':
leftJoystickY.value = event.state / 32768.0
if event.code == 'ABS_RX':
rightJoystickX.value = event.state / 32768.0
if event.code == 'ABS_RY':
rightJoystickY.value = event.state / 32768.0
if (event.ev_type == 'Key'):
if event.code == 'BTN_START':
startButton.value = event.state
print (event.state)
elif event.code == 'BTN_TR':
R1Button.value = event.state
print (event.state)
elif event.code == 'BTN_TL':
L1Button.value = event.state
print (event.state)
def stop(self):
self.running.value = False
self.process.terminate()
self.process.join()
if __name__ == "__main__":
gp = GamepadClient()
for i in range(1000):
print("LX = ", gp.leftJoystickX.value, end=" ; ")
print("LY = ", gp.leftJoystickY.value, end=" ; ")
print("RX = ", gp.rightJoystickX.value, end=" ; ")
print("RY = ", gp.rightJoystickY.value, end=" ; ")
print("start = ",gp.startButton.value)
print("R1 = ",gp.R1Button.value)
print("L1 = ",gp.L1Button.value)
time.sleep(0.1)
gp.stop()
|
threading.py
|
from threading import Thread
from concurrent.futures import Future
def call_with_future(fn, future, args, kwargs):
try:
result = fn(*args, **kwargs)
future.set_result(result)
except Exception as exc:
future.set_exception(exc)
def threaded(fn):
def wrapper(*args, **kwargs):
future = Future()
Thread(target=call_with_future, args=(fn, future, args, kwargs)).start()
return future
return wrapper
|
check_for_span.py
|
#!/usr/bin/python
"""Tool to quickly monitor for SPAN activity on interfaces."""
import os
import sys
import time
import pcapy # Works with Python2 and Python3. Use apt-get or pip, either way
import socket
import subprocess
import multiprocessing
import logging, logging.handlers
__author__ = 'Nicholas Albright'
__version__ = 0.2 # Python3 support added.
logging.basicConfig(format='%(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0))
MYIP = s.getsockname()[0]
def _pp(ts, pkt):
if pkt:
return 1
else:
return 0
def monitor(iface, ignoreip, queue):
"""Monitor a given interface."""
pc = pcapy.open_live(iface, 240, True, 100)
pc.setfilter('tcp and not host %s' % ignoreip)
count = 0
ts = int(time.time())
while (int(time.time()) - ts) < 10:
count += pc.dispatch(1, _pp)
if count > 30: # 30 packets in 10 seconds, TCP, not including our IP:
queue.put((iface, True))
else:
queue.put((iface, False))
def get_interfaces():
"""Collect Interface data."""
interface_list = []
interface_query = subprocess.Popen(
['/sbin/ifconfig', '-s'],
stdout=subprocess.PIPE).communicate()[0]
for line in interface_query.splitlines():
if '1500' not in line: # Ethernet
continue
if line.startswith('tun') or line.startswith('tap') or line.startswith('ppp'):
continue
interface_list.append(line.split(' ')[0])
return interface_list
def main():
"""Run our main loop."""
response_queue = multiprocessing.Queue()
procs = []
ifaces = get_interfaces()
try:
for interface in ifaces:
if __name__ == '__main__':
log.info('[+] Detected Interface: %s, sniffing...' % interface)
p = multiprocessing.Process(target=monitor, args=(interface, MYIP, response_queue,))
procs.append(p)
p.start()
except KeyboardInterrupt:
'Caught Break. Exiting.'
sys.exit()
for p in procs:
p.join()
all_good = False
while not response_queue.empty():
i, v = response_queue.get()
if __name__ == '__main__':
print('%s: %s' % (i, str(v)))
if v:
all_good = True
if all_good:
return True
return False
if __name__ == '__main__':
log.info('[=] Nicholas\' Span Port Identification Tool [=]')
if os.geteuid() != 0:
sys.exit('[-] Run with root/sudo.')
main()
|
test__local.py
|
import gevent.testing as greentest
from copy import copy
# Comment the line below to see that the standard thread.local is working correct
from gevent import monkey; monkey.patch_all()
from threading import local
from threading import Thread
from zope import interface
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping # pylint:disable=deprecated-class
class ReadProperty(object):
"""A property that can be overridden"""
# A non-data descriptor
def __get__(self, inst, klass):
return 42 if inst is not None else self
class A(local):
__slots__ = ['initialized', 'obj']
path = ''
type_path = 'MyPath'
read_property = ReadProperty()
def __init__(self, obj):
super(A, self).__init__()
if not hasattr(self, 'initialized'):
self.obj = obj
self.path = ''
class Obj(object):
pass
# These next two classes have to be global to avoid the leakchecks
deleted_sentinels = []
created_sentinels = []
class Sentinel(object):
def __del__(self):
deleted_sentinels.append(id(self))
class MyLocal(local):
CLASS_PROP = 42
def __init__(self):
local.__init__(self)
self.sentinel = Sentinel()
created_sentinels.append(id(self.sentinel))
@property
def desc(self):
return self
class MyLocalSubclass(MyLocal):
pass
class WithGetattr(local):
def __getattr__(self, name):
if name == 'foo':
return 42
return super(WithGetattr, self).__getattr__(name) # pylint:disable=no-member
class LocalWithABC(local, Mapping):
def __getitem__(self, name):
return self.d[name]
def __iter__(self):
return iter(self.d)
def __len__(self):
return len(self.d)
class LocalWithStaticMethod(local):
@staticmethod
def a_staticmethod():
return 42
class LocalWithClassMethod(local):
@classmethod
def a_classmethod(cls):
return cls
class TestGeventLocal(greentest.TestCase):
# pylint:disable=attribute-defined-outside-init,blacklisted-name
def setUp(self):
del deleted_sentinels[:]
del created_sentinels[:]
tearDown = setUp
def test_create_local_subclass_init_args(self):
with self.assertRaisesRegex(TypeError,
"Initialization arguments are not supported"):
local("foo")
with self.assertRaisesRegex(TypeError,
"Initialization arguments are not supported"):
local(kw="foo")
def test_local_opts_not_subclassed(self):
l = local()
l.attr = 1
self.assertEqual(l.attr, 1)
def test_cannot_set_delete_dict(self):
l = local()
with self.assertRaises(AttributeError):
l.__dict__ = 1
with self.assertRaises(AttributeError):
del l.__dict__
def test_delete_with_no_dict(self):
l = local()
with self.assertRaises(AttributeError):
delattr(l, 'thing')
def del_local():
with self.assertRaises(AttributeError):
delattr(l, 'thing')
t = Thread(target=del_local)
t.start()
t.join()
def test_slot_and_type_attributes(self):
a = A(Obj())
a.initialized = 1
self.assertEqual(a.initialized, 1)
# The slot is shared
def demonstrate_slots_shared():
self.assertEqual(a.initialized, 1)
a.initialized = 2
greenlet = Thread(target=demonstrate_slots_shared)
greenlet.start()
greenlet.join()
self.assertEqual(a.initialized, 2)
# The slot overrides dict values
a.__dict__['initialized'] = 42 # pylint:disable=unsupported-assignment-operation
self.assertEqual(a.initialized, 2)
# Deleting the slot deletes the slot, but not the dict
del a.initialized
self.assertFalse(hasattr(a, 'initialized'))
self.assertIn('initialized', a.__dict__)
# We can delete the 'path' ivar
# and fall back to the type
del a.path
self.assertEqual(a.path, '')
with self.assertRaises(AttributeError):
del a.path
# A read property calls get
self.assertEqual(a.read_property, 42)
a.read_property = 1
self.assertEqual(a.read_property, 1)
self.assertIsInstance(A.read_property, ReadProperty)
# Type attributes can be read
self.assertEqual(a.type_path, 'MyPath')
self.assertNotIn('type_path', a.__dict__)
# and replaced in the dict
a.type_path = 'Local'
self.assertEqual(a.type_path, 'Local')
self.assertIn('type_path', a.__dict__)
def test_attribute_error(self):
# pylint:disable=attribute-defined-outside-init
a = A(Obj())
with self.assertRaises(AttributeError):
getattr(a, 'fizz_buzz')
def set_fizz_buzz():
a.fizz_buzz = 1
greenlet = Thread(target=set_fizz_buzz)
greenlet.start()
greenlet.join()
with self.assertRaises(AttributeError):
getattr(a, 'fizz_buzz')
def test_getattr_called(self):
getter = WithGetattr()
self.assertEqual(42, getter.foo)
getter.foo = 'baz'
self.assertEqual('baz', getter.foo)
def test_copy(self):
a = A(Obj())
a.path = '123'
a.obj.echo = 'test'
b = copy(a)
# Copy makes a shallow copy. Meaning that the attribute path
# has to be independent in the original and the copied object because the
# value is a string, but the attribute obj should be just reference to
# the instance of the class Obj
self.assertEqual(a.path, b.path, 'The values in the two objects must be equal')
self.assertEqual(a.obj, b.obj, 'The values must be equal')
b.path = '321'
self.assertNotEqual(a.path, b.path, 'The values in the two objects must be different')
a.obj.echo = "works"
self.assertEqual(a.obj, b.obj, 'The values must be equal')
def test_copy_no_subclass(self):
a = local()
setattr(a, 'thing', 42)
b = copy(a)
self.assertEqual(b.thing, 42)
self.assertIsNot(a.__dict__, b.__dict__)
def test_objects(self):
# Test which failed in the eventlet?!
a = A({})
a.path = '123'
b = A({'one': 2})
b.path = '123'
self.assertEqual(a.path, b.path, 'The values in the two objects must be equal')
b.path = '321'
self.assertNotEqual(a.path, b.path, 'The values in the two objects must be different')
def test_class_attr(self, kind=MyLocal):
mylocal = kind()
self.assertEqual(42, mylocal.CLASS_PROP)
mylocal.CLASS_PROP = 1
self.assertEqual(1, mylocal.CLASS_PROP)
self.assertEqual(mylocal.__dict__['CLASS_PROP'], 1) # pylint:disable=unsubscriptable-object
del mylocal.CLASS_PROP
self.assertEqual(42, mylocal.CLASS_PROP)
self.assertIs(mylocal, mylocal.desc)
def test_class_attr_subclass(self):
self.test_class_attr(kind=MyLocalSubclass)
def test_locals_collected_when_greenlet_dead_but_still_referenced(self):
# https://github.com/gevent/gevent/issues/387
import gevent
my_local = MyLocal()
my_local.sentinel = None
greentest.gc_collect_if_needed()
del created_sentinels[:]
del deleted_sentinels[:]
def demonstrate_my_local():
# Get the important parts
getattr(my_local, 'sentinel')
# Create and reference greenlets
greenlets = [Thread(target=demonstrate_my_local) for _ in range(5)]
for t in greenlets:
t.start()
gevent.sleep()
self.assertEqual(len(created_sentinels), len(greenlets))
for g in greenlets:
assert not g.is_alive()
gevent.sleep() # let the callbacks run
greentest.gc_collect_if_needed()
# The sentinels should be gone too
self.assertEqual(len(deleted_sentinels), len(greenlets))
@greentest.skipOnLibuvOnPyPyOnWin("GC makes this non-deterministic, especially on Windows")
def test_locals_collected_when_unreferenced_even_in_running_greenlet(self):
# In fact only on Windows do we see GC being an issue;
# pypy2 5.0 on macos and travis don't have a problem.
# https://github.com/gevent/gevent/issues/981
import gevent
import gc
gc.collect()
count = 1000
running_greenlet = None
def demonstrate_my_local():
for _ in range(1000):
x = MyLocal()
self.assertIsNotNone(x.sentinel)
x = None
gc.collect()
gc.collect()
self.assertEqual(count, len(created_sentinels))
# They're all dead, even though this greenlet is
# still running
self.assertEqual(count, len(deleted_sentinels))
# The links were removed as well.
self.assertFalse(running_greenlet.has_links())
running_greenlet = gevent.spawn(demonstrate_my_local)
gevent.sleep()
running_greenlet.join()
self.assertEqual(count, len(deleted_sentinels))
@greentest.ignores_leakcheck
def test_local_dicts_for_greenlet(self):
import gevent
from gevent.local import all_local_dicts_for_greenlet
class MyGreenlet(gevent.Greenlet):
results = None
id_x = None
def _run(self): # pylint:disable=method-hidden
x = local()
x.foo = 42
self.id_x = id(x)
self.results = all_local_dicts_for_greenlet(self)
g = MyGreenlet()
g.start()
g.join()
self.assertTrue(g.successful, g)
self.assertEqual(g.results,
[((local, g.id_x), {'foo': 42})])
def test_local_with_abc(self):
# an ABC (or generally any non-exact-type) in the MRO doesn't
# break things. See https://github.com/gevent/gevent/issues/1201
x = LocalWithABC()
x.d = {'a': 1}
self.assertEqual({'a': 1}, x.d)
# The ABC part works
self.assertIn('a', x.d)
self.assertEqual(['a'], list(x.keys()))
def test_local_with_staticmethod(self):
x = LocalWithStaticMethod()
self.assertEqual(42, x.a_staticmethod())
def test_local_with_classmethod(self):
x = LocalWithClassMethod()
self.assertIs(LocalWithClassMethod, x.a_classmethod())
class TestLocalInterface(greentest.TestCase):
__timeout__ = None
@greentest.ignores_leakcheck
def test_provides(self):
# https://github.com/gevent/gevent/issues/1122
# pylint:disable=inherit-non-class
class IFoo(interface.Interface):
pass
@interface.implementer(IFoo)
class Base(object):
pass
class Derived(Base, local):
pass
d = Derived()
p = list(interface.providedBy(d))
self.assertEqual([IFoo], p)
@greentest.skipOnPurePython("Needs C extension")
class TestCExt(greentest.TestCase): # pragma: no cover
def test_c_extension(self):
self.assertEqual(local.__module__,
'gevent._gevent_clocal')
@greentest.skipWithCExtensions("Needs pure-python")
class TestPure(greentest.TestCase):
def test_extension(self):
self.assertEqual(local.__module__,
'gevent.local')
if __name__ == '__main__':
greentest.main()
|
local_job_service.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import concurrent.futures
import itertools
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from typing import TYPE_CHECKING
from typing import List
from typing import Optional
import grpc
from google.protobuf import json_format
from google.protobuf import text_format # type: ignore # not in typeshed
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import portable_runner
from apache_beam.runners.portability.fn_api_runner import fn_runner
from apache_beam.runners.portability.fn_api_runner import worker_handlers
from apache_beam.utils import thread_pool_executor
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.portability.api import beam_runner_api_pb2
_LOGGER = logging.getLogger(__name__)
def _iter_queue(q):
while True:
yield q.get(block=True)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None, beam_job_type=None):
super().__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._artifact_service = artifact_service.ArtifactStagingService(
artifact_service.BeamFilesystemHandler(self._staging_dir).file_writer)
self._artifact_staging_endpoint = None # type: Optional[endpoints_pb2.ApiServiceDescriptor]
self._beam_job_type = beam_job_type or BeamJob
def create_beam_job(self,
preparation_id, # stype: str
job_name, # type: str
pipeline, # type: beam_runner_api_pb2.Pipeline
options # type: struct_pb2.Struct
):
# type: (...) -> BeamJob
self._artifact_service.register_job(
staging_token=preparation_id,
dependency_sets={
id: env.dependencies
for (id, env) in pipeline.components.environments.items()
})
provision_info = fn_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(pipeline_options=options),
self._staging_dir,
job_name=job_name)
return self._beam_job_type(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint,
self._artifact_service)
def get_bind_address(self):
"""Return the address used to open the port on the gRPC server.
This is often, but not always the same as the service address. For
example, to make the service accessible to external machines, override this
to return '[::]' and override `get_service_address()` to return a publicly
accessible host name.
"""
return self.get_service_address()
def get_service_address(self):
"""Return the host name at which this server will be accessible.
In particular, this is provided to the client upon connection as the
artifact staging endpoint.
"""
return 'localhost'
def start_grpc_server(self, port=0):
options = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1),
("grpc.http2.max_pings_without_data", 0),
("grpc.http2.max_ping_strikes", 0)]
self._server = grpc.server(
thread_pool_executor.shared_unbounded_instance(), options=options)
port = self._server.add_insecure_port(
'%s:%d' % (self.get_bind_address(), port))
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
hostname = self.get_service_address()
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='%s:%d' % (hostname, port))
self._server.start()
_LOGGER.info('Grpc server started at %s on port %d' % (hostname, port))
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
if result is not None:
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos.is_user_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(
self,
worker_command_line, # type: bytes
control_address,
provision_info,
worker_id=None):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._provision_info = provision_info
self._worker_id = worker_id
def run(self):
options = [("grpc.http2.max_pings_without_data", 0),
("grpc.http2.max_ping_strikes", 0)]
logging_server = grpc.server(
thread_pool_executor.shared_unbounded_instance(), options=options)
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
pipeline_options = json_format.MessageToJson(
self._provision_info.provision_info.pipeline_options)
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor,
PIPELINE_OPTIONS=pipeline_options)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with worker_handlers.SUBPROCESS_LOCK:
p = subprocess.Popen(self._worker_command_line, shell=True, env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id, # type: str
pipeline,
options,
provision_info, # type: fn_runner.ExtendedProvisionInfo
artifact_staging_endpoint, # type: Optional[endpoints_pb2.ApiServiceDescriptor]
artifact_service, # type: artifact_service.ArtifactStagingService
):
super().__init__(job_id, provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._artifact_service = artifact_service
self._state_queues = [] # type: List[queue.Queue]
self._log_queues = JobLogQueues()
self.daemon = True
self.result = None
def set_state(self, new_state):
"""Set the latest state as an int enum and notify consumers"""
timestamp = super().set_state(new_state)
if timestamp is not None:
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put((new_state, timestamp))
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.set_state(beam_job_api_pb2.JobState.STARTING)
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
with JobLogHandler(self._log_queues) as log_handler:
self._update_dependencies()
try:
start = time.time()
self.result = self._invoke_runner()
self.result.wait_until_finish()
_LOGGER.info(
'Completed job in %s seconds with state %s.',
time.time() - start,
self.result.state)
self.set_state(
portable_runner.PipelineResult.pipeline_state_to_runner_api_state(
self.result.state))
except: # pylint: disable=bare-except
self._log_queues.put(
beam_job_api_pb2.JobMessage(
message_id=log_handler._next_id(),
time=time.strftime('%Y-%m-%d %H:%M:%S.'),
importance=beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
message_text=traceback.format_exc()))
_LOGGER.exception('Error running pipeline.')
self.set_state(beam_job_api_pb2.JobState.FAILED)
raise
def _invoke_runner(self):
self.set_state(beam_job_api_pb2.JobState.RUNNING)
return fn_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
def _update_dependencies(self):
try:
for env_id, deps in self._artifact_service.resolved_deps(
self._job_id, timeout=0).items():
# Slice assignment not supported for repeated fields.
env = self._pipeline_proto.components.environments[env_id]
del env.dependencies[:]
env.dependencies.extend(deps)
self._provision_info.provision_info.ClearField('retrieval_token')
except concurrent.futures.TimeoutError:
pass # TODO(BEAM-9577): Require this once all SDKs support it.
def cancel(self):
if not self.is_terminal_state(self.state):
self.set_state(beam_job_api_pb2.JobState.CANCELLING)
# TODO(robertwb): Actually cancel...
self.set_state(beam_job_api_pb2.JobState.CANCELLED)
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
for state, timestamp in self.with_state_history(_iter_queue(state_queue)):
yield state, timestamp
if self.is_terminal_state(state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
for msg in itertools.chain(self._log_queues.cache(),
self.with_state_history(_iter_queue(log_queue))):
if isinstance(msg, tuple):
assert len(msg) == 2 and isinstance(msg[0], int)
current_state = msg[0]
yield msg
if self.is_terminal_state(current_state):
break
else:
yield msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogQueues(object):
def __init__(self):
self._queues = [] # type: List[queue.Queue]
self._cache = []
self._cache_size = 10
self._lock = threading.Lock()
def cache(self):
with self._lock:
return list(self._cache)
def append(self, queue):
with self._lock:
self._queues.append(queue)
def put(self, msg):
with self._lock:
if len(self._cache) < self._cache_size:
self._cache.append(msg)
else:
min_level = min(m.importance for m in self._cache)
if msg.importance >= min_level:
self._cache.append(msg)
for ix, m in enumerate(self._cache):
if m.importance == min_level:
del self._cache[ix]
break
for queue in self._queues:
queue.put(msg)
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super().__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
return self
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime(
'%Y-%m-%d %H:%M:%S.', time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
self._log_queues.put(msg)
|
pjit_test.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from functools import partial
import logging
import threading
import unittest
from collections import OrderedDict, namedtuple
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import jax
import jax.numpy as jnp
from jax._src import test_util as jtu
from jax import stages
from jax.errors import JAXTypeError
from jax import lax
# TODO(skye): do we still wanna call this PartitionSpec?
from jax.experimental import maps
from jax.experimental import PartitionSpec as P
from jax.experimental.maps import xmap
from jax.experimental import global_device_array
from jax.experimental import multihost_utils
import jax.experimental.pjit as pjit_lib
from jax.experimental.pjit import (pjit, pjit_p, with_sharding_constraint,
SpecSync, FROM_GDA, AUTO)
from jax.interpreters import pxla
from jax.interpreters import mlir
from jax._src.lib import xla_client, xla_bridge
from jax._src.util import prod, curry, unzip2, safe_zip
from jax.config import config
config.parse_flags_with_absl()
prev_xla_flags = None
def setUpModule():
global prev_xla_flags
prev_xla_flags = os.getenv("XLA_FLAGS")
flags_str = prev_xla_flags or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in flags_str:
os.environ["XLA_FLAGS"] = (flags_str +
" --xla_force_host_platform_device_count=8")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
jtu.set_spmd_lowering_flag(True)
def tearDownModule():
if prev_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = prev_xla_flags
xla_bridge.get_backend.cache_clear()
jtu.restore_spmd_lowering_flag()
def create_gda(global_shape, global_mesh, mesh_axes, global_data=None):
if global_data is None:
global_data = np.arange(
prod(global_shape), dtype=np.float32).reshape(global_shape)
return global_device_array.GlobalDeviceArray.from_callback(
global_shape, global_mesh, mesh_axes, lambda idx: global_data[idx])
@curry
def check_1d_2d_mesh(f, set_mesh):
return parameterized.named_parameters(
{"testcase_name": "_" + name, "mesh": mesh, "resources": resources}
for name, mesh, resources in (
("2", (("x", 2),), "x"),
("2x1", (("x", 2), ("y", 1)), ("x", "y")),
("2x2", (("x", 2), ("y", 2)), ("x", "y")),
))(jtu.with_mesh_from_kwargs(f) if set_mesh else f)
# TODO(skye): make the buffer donation utils part of JaxTestCase
class PJitTest(jtu.BufferDonationTestCase):
@jtu.with_mesh([('x', 1)])
def testDeviceBufferAval(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=P('x'))
def f(x):
return x
shape = (2, 2)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x)
expected = x
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 1)
self.assertAllClose(
actual.device_buffers[0].to_py(), expected, check_dtypes=False)
# Repro for a bug on device_buffer aval
_ = repr(actual.device_buffers)
@jtu.with_mesh([('x', 2)])
def testBasic1D(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testJitOfPjitDisallowed(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
with self.assertRaises(RuntimeError,
msg="Nesting pjit() inside jit() is not allowed."):
jax.jit(f)(x, x + 1)
@jtu.with_mesh([('x', 2)])
def testUnevenShardingConstraint(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
x = x[:3]
y = y[:3]
x = with_sharding_constraint(x, P('x'))
y = with_sharding_constraint(y, P('x'))
out = x + y
return jnp.pad(out, [[0, 1]])
shape = (4,)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertAllClose(actual[:3], expected[:3], check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py()[:3], expected[:3],
check_dtypes=False)
def testBasic1DWithMeshContextManager(self):
@partial(pjit,
in_axis_resources=(P('x'), P('x')),
out_axis_resources=None)
def f(x, y):
return x + y
shape = (8, 8)
x = np.arange(prod(shape), dtype=np.float32).reshape(shape)
with jtu.create_global_mesh((2,), ('x')) as mesh:
actual = f(x, x + 1)
expected = x + (x + 1)
self.assertEqual(mesh, jtu.create_global_mesh((2,), ('x')))
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testBasic2D(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
def testBasic2DWithMeshContextManager(self):
@partial(pjit,
in_axis_resources=(P(None, 'x', 'y'), P('y')),
out_axis_resources=P('x'))
def f(x, y):
return x @ y
x_shape = (8, 6, 4)
y_shape = (4, 2)
x = jnp.arange(np.prod(x_shape)).reshape(x_shape)
y = jnp.arange(np.prod(y_shape)).reshape(y_shape)
mesh = jtu.create_global_mesh((2, 2), ('x', 'y'))
with mesh:
actual = f(x, y)
expected = x @ y
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
split0, split1 = np.split(expected, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), split0,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), split1,
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), split1,
check_dtypes=False)
def testDifferentNestedMesh(self):
with jtu.create_global_mesh((2, 1), ("x", "y")) as m1:
with jtu.create_global_mesh((2, 2), ("a", "b")) as m2:
self.assertEqual(pxla.thread_resources.env.physical_mesh, m2)
self.assertEqual(pxla.thread_resources.env.physical_mesh, m1)
self.assertEqual(pxla.thread_resources.env.physical_mesh,
pxla.EMPTY_ENV.physical_mesh)
def testSameNestedMesh(self):
mesh = jtu.create_global_mesh((2, 1), ("a", "b"))
with mesh as m1:
with mesh as m2:
self.assertEqual(pxla.thread_resources.env.physical_mesh, m2)
self.assertEqual(pxla.thread_resources.env.physical_mesh, m1)
self.assertEqual(pxla.thread_resources.env.physical_mesh,
pxla.EMPTY_ENV.physical_mesh)
def testMeshDecorator(self):
x = jnp.arange(8)
mesh_shape = (2, 2)
size = prod(mesh_shape)
if len(jax.devices()) < size:
raise unittest.SkipTest(f"Test requires {size} global devices.")
mesh_devices = np.array(jax.devices()[:size]).reshape(mesh_shape)
@maps.Mesh(mesh_devices, ('x', 'y'))
def dec():
return pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)(x)
out = dec()
self.assertArraysEqual(out, x)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testTwoMeshAxisSharding(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
actual = f(x, x + 1)
expected = x @ (x + 1)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 4)
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
@jtu.with_mesh([('x', 2)])
def testBufferDonation(self):
if jax.default_backend() not in {'gpu', 'tpu'}:
raise unittest.SkipTest('Buffer donation only supported on GPU and TPU')
@partial(pjit,
in_axis_resources=P('x'),
out_axis_resources=P('x'),
donate_argnums=0)
def f(x, y):
return x + y
shard = pjit(lambda x: x, in_axis_resources=P('x'),
out_axis_resources=P('x'))
x = shard(jnp.ones((2, 5)) * 4)
y = shard(jnp.ones((2, 5)) * 2)
expected = x + y
self.assertAllClose(f(x, y), expected)
self.assertNotDeleted(y)
self.assertDeleted(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraint(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
y = x + 1
y = with_sharding_constraint(y, P('x', 'y'))
return y * 2
shape = (8, 8)
x = np.arange(prod(shape)).reshape(shape)
expected = (x + 1) * 2
actual = f(x)
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertIsInstance(actual, pxla.ShardedDeviceArray)
self.assertLen(actual.device_buffers, 2)
self.assertAllClose(actual.device_buffers[0].to_py(), expected,
check_dtypes=False)
hlo = f.lower(np.ones(shape)).compiler_ir(dialect="hlo")
# Annotation from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingConstraintPyTree(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(x, [P('x', 'y'), P('y', 'x')])
x = x.copy()
x[0]["a"] *= 2
return x
shape = (8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{"a": v, "b": v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]["a"] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]["a"].device_buffers, 2)
hlo = f.lower(x).compiler_ir(dialect="hlo")
# Annotations from with_sharding_constraint
self.assertIn("sharding={devices=[2,1]0,1}", hlo.as_hlo_text())
self.assertIn("sharding={devices=[1,2]0,1}", hlo.as_hlo_text())
# Annotation from pjit
self.assertIn("sharding={replicated}", hlo.as_hlo_text())
@jtu.with_mesh([('x', 2), ('y', 2)])
def testShardingConstraintPyTreeWithUnconstrainedDims(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None)
def f(x):
x = with_sharding_constraint(
x, [P(P.UNCONSTRAINED, 'y', None),
P('x', P.UNCONSTRAINED, None)])
x = x.copy()
x[0]['a'] *= 2
return x
shape = (2, 8, 8)
v = np.arange(prod(shape)).reshape(shape)
x = [{'a': v, 'b': v * 2}, v * 3]
actual = f(x)
expected = x.copy()
expected[0]['a'] *= 2
self.assertAllClose(actual, expected, check_dtypes=False)
self.assertLen(actual[0]['a'].device_buffers, 4)
mhlo_str = str(f.lower(x).compiler_ir(dialect="mhlo"))
self.assertIn("unspecified_dims=[0]", mhlo_str)
self.assertIn("unspecified_dims=[1]", mhlo_str)
def testCaching(self):
def f(x):
assert should_be_tracing
return jnp.sin(x) * 2
x = np.arange(16).reshape(4, 4)
devices = np.array(list(jax.local_devices())[:4])
if devices.size < 4:
raise unittest.SkipTest("Test requires 4 devices")
devices = devices.reshape((2, 2))
with maps.Mesh(devices, ('x', 'y')):
should_be_tracing = True
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
# Re-create the mesh to make sure that has no influence on caching
with maps.Mesh(devices, ('x', 'y')):
should_be_tracing = False
pjit(f, in_axis_resources=P(('x', 'y')), out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testNested(self):
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum() + h.sum(), in_axis_resources=P('x', 'y'), out_axis_resources=None)
g = pjit(lambda x: f(jnp.sin(x)), in_axis_resources=P('x', None), out_axis_resources=None)
x = jnp.arange(16).reshape((4, 4))
y = g(x)
self.assertAllClose(y, jnp.sin(x).sum() + h.sum())
self.assertTrue(hasattr(y, "sharding_spec"))
@check_1d_2d_mesh(set_mesh=True)
def testAutodiff(self, mesh, resources):
if len(mesh) != 2: return
assert resources == ('x', 'y')
# Add a constant captured by the nested pjit to make things more complicated
h = jnp.arange(4)
f = pjit(lambda x: x.sum(1) * h.sum(),
in_axis_resources=P('x', 'y'), out_axis_resources=P(('x', 'y')))
g = pjit(lambda x: f(jnp.sin(x * 4 + 2)),
in_axis_resources=P('x', None), out_axis_resources=P(('x', 'y')))
jtu.check_grads(g, (jnp.arange(16, dtype=jnp.float32).reshape((4, 4)) / 100,),
order=2)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testEvalJaxpr(self):
x, y = jnp.arange(4), jnp.arange(5)
f = pjit(lambda x, y: x.sum() + jnp.sin(y),
in_axis_resources=(P('x'), P('y')),
out_axis_resources=P('y'))
f_jaxpr = jax.make_jaxpr(f)(x, y)
f_eval = jax.core.jaxpr_as_fun(f_jaxpr)
r, = f_eval(x, y)
self.assertAllClose(r, x.sum() + jnp.sin(y))
@jtu.with_mesh([('x', 2)])
def testNonArrayArg(self):
self.assertEqual(pjit(lambda x: x + 2,
in_axis_resources=None,
out_axis_resources=None)(1), 3)
@jtu.with_mesh([('x', 2)])
def testNonHashableAxisResources(self):
x = jnp.arange(4)
y = pjit(lambda x: {'b': x['a'] + 2},
in_axis_resources=({'a': P('x')},),
out_axis_resources={'b': P('x')})({'a': x})
self.assertAllClose(y, {'b': x + 2})
@jtu.with_mesh([('x', 2)])
def testGradOfConstraint(self):
# Make sure that we can compute grads through sharding constraints
h = lambda x: jnp.sin(with_sharding_constraint(x, P('x'))).sum()
f = pjit(lambda x: jax.grad(h)(x),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(8, dtype=jnp.float32)
self.assertAllClose(f(x), jnp.cos(x))
@jtu.with_mesh([('x', 2)])
def testNoopPartitionSpecs(self):
noops = [P(), P(None), P(()), P((), None), P(None, None, ())]
x = jnp.arange(8).reshape((2, 2, 2))
for spec in noops:
y = pjit(lambda x: x * 2, in_axis_resources=spec, out_axis_resources=spec)(x)
self.assertAllClose(y, x * 2)
@jtu.with_mesh([('x', 2)])
def testVmapModifiesAxisResources(self):
h = pjit(lambda x, y: (x + y, x, y), in_axis_resources=P('x'), out_axis_resources=None)
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(h, in_axes=(None, 0)))(x, y).jaxpr
eqn = jaxpr.eqns[0]
self.assertIs(eqn.primitive, pjit_p)
x_sync, y_sync = (spec.sync for spec in eqn.params['in_axis_resources'])
self.assertEqual(x_sync, SpecSync.IN_SYNC)
self.assertEqual(y_sync, SpecSync.DIM_PERMUTE)
x_sync, y_sync, z_sync = (spec.sync for spec in eqn.params['out_axis_resources'])
self.assertEqual(x_sync, SpecSync.DIM_PERMUTE)
self.assertEqual(y_sync, SpecSync.IN_SYNC)
self.assertEqual(z_sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2)])
def testVMap(self):
f = pjit(lambda x, y: (x + y, x), in_axis_resources=P('x'), out_axis_resources=P('x'))
x = jnp.arange(4)
y = jnp.arange(5*4).reshape((5, 4))
z, w = jax.vmap(f, in_axes=(None, 0), out_axes=(0, None))(x, y)
self.assertAllClose(z, x[jnp.newaxis] + y)
self.assertAllClose(w, x)
self.assertEqual(z.sharding_spec.sharding, (pxla.NoSharding(), pxla.Chunked([2])))
self.assertEqual(w.sharding_spec.sharding, (pxla.Chunked([2]),))
@jtu.with_mesh([('x', 2)])
def testVMapShardingConstraint(self):
f = pjit(lambda x: with_sharding_constraint(x, P('x')),
in_axis_resources=P(), out_axis_resources=P('x'))
x = jnp.arange(5*4).reshape((5, 4))
jaxpr = jax.make_jaxpr(jax.vmap(f))(x)
pjit_eqn, = jaxpr.eqns
constraint_eqn, = pjit_eqn.params['jaxpr'].eqns
self.assertEqual(constraint_eqn.params['axis_resources'].partitions, (None, ('x',)))
self.assertEqual(constraint_eqn.params['axis_resources'].sync, SpecSync.DIM_PERMUTE)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testShardingInXMap(self):
h = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=None)
f = xmap(lambda x: h(x * 2), in_axes=['i', ...], out_axes=['i', ...],
axis_resources={'i': 'y'})
x = jnp.arange(16).reshape((4, 4))
rule = mlir._lowerings[pjit_p]
test_rule_called = False
def _test_rule(*args, **kwargs):
nonlocal test_rule_called
test_rule_called = True
in_axis_resources = kwargs['in_axis_resources']
self.assertEqual(len(in_axis_resources), 1)
self.assertIn(('y',), in_axis_resources[0].partitions)
return rule(*args, **kwargs)
try:
mlir._lowerings[pjit_p] = _test_rule
f(x)
self.assertTrue(test_rule_called)
finally:
mlir._lowerings[pjit_p] = rule
@jtu.with_mesh([('x', 2)])
def testLowerWithDuckTyping(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
# Make sure this doesn't crash
pjit(lambda x: x + 4,
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
@jtu.with_mesh([('x', 2)])
def testLowerDonateArgnumsAvailable(self):
x = jax.ShapeDtypeStruct((2, 2), jnp.float32)
def f(*args):
x, *_ = args
return x
f_low = pjit(f, donate_argnums=(0,),
in_axis_resources=P('x'), out_axis_resources=P('x')).lower(x)
f_com = f_low.compile()
f_low.donate_argnums == f_com.donate_argnums == (0,)
def testInfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f_for_jit(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(z,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
(w,), token = lax.infeed(
token, shape=(jax.ShapedArray(x.shape, np.float32),))
return x + y + z + w
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = x * 2.
z = x * 3.
w = x * 4.
# Transfer data to infeed before executing the function. For GPUs, the
# execution of the compiled function is blocking, so transferring data
# to infeed before executing ensures that the execution does not deadlock
# waiting for the infeed data.
logging.info('Transfering to infeed for the jit call')
d = devices[0]
d.transfer_to_infeed((y,))
d.transfer_to_infeed((z,))
d.transfer_to_infeed((w,))
# JIT
logging.info('Making jit call')
res0 = jax.jit(f_for_jit)(x)
self.assertAllClose(res0, x + y + z + w, check_dtypes=True)
# PJIT
def f_for_pjit(x):
token = lax.create_token(x)
# A replicated infeed
(y,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(None,))
# An infeed sharded on first axis
(z,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(nr_devices, 1),))
# An infeed sharded on second axis
(w,), token = lax.infeed(
token,
shape=(jax.ShapedArray(x.shape, np.float32),),
partitions=(P(1, nr_devices),))
return x + y + z + w
logging.info('Transfering to infeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array to all devices for replicated.
d.transfer_to_infeed((y,))
# For sharded infeed, transfer only the needed slices to each device.
d.transfer_to_infeed((z[3 * didx:3 * didx + 3, :]))
d.transfer_to_infeed((w[:, 5 * didx:5 * didx + 5],))
with maps.Mesh(devices, ['d']):
logging.info('Making pjit call')
res = pjit(
f_for_pjit, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(
x)
self.assertAllClose(res0, res, check_dtypes=True)
def testOutfeed(self):
devices = np.array(jax.local_devices())
nr_devices = len(devices)
shape = (nr_devices * 3, nr_devices * 5)
def f(x):
token = lax.create_token(x)
token = lax.outfeed(token, x, partitions=(None,))
token = lax.outfeed(token, x, partitions=(P(nr_devices, 1),))
token = lax.outfeed(token, x, partitions=(P(1, nr_devices),))
return x
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
def dispatch():
with maps.Mesh(devices, ['d']):
logging.info('Making pjit call')
pjit(f, in_axis_resources=(P('d'),), out_axis_resources=P('d'))(x)
execution = threading.Thread(target=dispatch)
execution.start()
def check_outfeed(d, x):
y, = d.transfer_from_outfeed(
xla_client.shape_from_pyval((x,)).with_major_to_minor_layout_if_absent())
self.assertAllClose(x, y, check_dtypes=True)
logging.info('Transfering from outfeed for the pjit call')
for didx, d in enumerate(devices):
# Transfer the whole array from all devices for replicated.
check_outfeed(d, x)
# For sharded outfeed, the results are sliced.
check_outfeed(d, x[3 * didx:3 * didx + 3, :])
check_outfeed(d, x[:, 5 * didx:5 * didx + 5])
execution.join()
@jtu.with_mesh([('x', 2)])
def testWithCustomPRNGKey(self):
if not config.jax_enable_custom_prng:
raise unittest.SkipTest("test requires jax_enable_custom_prng")
key = jax.prng.seed_with_impl(jax.prng.rbg_prng_impl, 87)
# Make sure this doesn't crash
pjit(lambda x: x, in_axis_resources=(None), out_axis_resources=(None))(key)
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompile(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
expected = x @ (x + 1)
lowered = f.lower(x, x + 1)
compiled = lowered.compile()
actual = compiled(x, x + 1)
self.assertEqual(lowered.in_avals, compiled.in_avals)
self.assertEqual(
lowered.in_avals,
((jax.ShapedArray(x.shape, x.dtype, weak_type=False),) * 2, {}))
splits = np.split(expected, 4)
self.assertAllClose(actual.device_buffers[0].to_py(), splits[0],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[1].to_py(), splits[1],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[2].to_py(), splits[2],
check_dtypes=False)
self.assertAllClose(actual.device_buffers[3].to_py(), splits[3],
check_dtypes=False)
for obj in [lowered, compiled]:
self.assertTrue(obj._no_kwargs, True)
self.assertEqual(obj.in_tree, jax.tree_flatten(((0, 0), {}))[1])
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileWithKwargs(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y, **kwargs):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
NotImplementedError,
"function was compiled by a transformation that does not support "
"keyword arguments, but called with keyword arguments: a, b",
lambda: exe(x, x + 1, a=1, b=2))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileInTreeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(x, x + 1).compile()
self.assertRaisesRegex(
TypeError, "function compiled for .*, called with .*",
lambda: exe([x], [x + 1]))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileArgTypeMismatch(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
x_f32 = x.astype(jnp.float32)
x_i32 = x.astype(jnp.int32)
exe = f.lower(x_f32, x_f32).compile()
self.assertRaisesRegex(
TypeError,
"Computation compiled for input types:\n.*float32.*\n"
"called with:\n.*int32.*",
lambda: exe(x_i32, x_i32))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompilerIR(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
f = f.lower(x, x + 1)
self.assertIsNotNone(f.compiler_ir())
self.assertIsNotNone(f.compiler_ir(dialect='hlo'))
self.assertIsNotNone(f.compiler_ir(dialect='mhlo'))
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileCompilerIR(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
self.assertIsNotNone(f.compiler_ir())
@jtu.with_mesh([('x', 2), ('y', 2)])
def testLowerCompileExecutable(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
x = jnp.arange(np.prod(shape)).reshape(shape)
f = f.lower(x, x + 1).compile()
self.assertIsNotNone(f.runtime_executable())
@jtu.with_mesh([('x', 2)])
def test_static_argnums(self):
@partial(pjit, in_axis_resources=None, out_axis_resources=None,
static_argnums=(1,))
def f(x, y):
return x + (3 if y == 'hi' else 4)
self.assertEqual(f(1, 'hi' ), 4)
self.assertEqual(f(1, 'bye'), 5)
@jtu.with_mesh([('x', 4), ('y', 2)])
def testLowerCompileWithAvals(self):
@partial(pjit,
in_axis_resources=P(('x', 'y'),),
out_axis_resources=P(('x', 'y'),))
def f(x, y):
return x @ y
shape = (8, 8)
aval = jax.ShapedArray(shape, jnp.int64)
x = jnp.arange(np.prod(shape)).reshape(shape)
exe = f.lower(aval, x, _global_avals=True).compile()
self.assertIsInstance(exe, stages.Compiled)
self.assertArraysEqual(exe(x, x), x @ x)
class GDAPjitTest(jtu.JaxTestCase):
def test_pjit_gda_single_output(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
with global_mesh:
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x @ x.T
expected_matrix_mul = input_data @ input_data.T
out = f(gda_obj)
self.assertIsInstance(out, global_device_array.GlobalDeviceArray)
self.assertEqual(out.shape, (8, 8))
self.assertEqual(out.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out.mesh.shape, {'x': 4, 'y': 2})
for s in out.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
out2 = f(out)
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
with self.assertRaisesRegex(
ValueError, ('For a non-GDA input, the corresponding resource in '
'in_axis_resources cannot be `pjit.FROM_GDA`.')):
f(input_data)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_multi_input_multi_output(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
mesh_axes1 = P('x', 'y')
gda1 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes1, cb)
mesh_axes2 = P('x')
gda2 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes2, cb)
mesh_axes3 = P(('x', 'y'))
gda3 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes3, cb)
mesh_axes4 = P(None)
gda4 = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes4, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(
pjit,
# `FROM_GDA` will be replicated for all the inputs.
in_axis_resources=FROM_GDA,
out_axis_resources=(mesh_axes1, mesh_axes4, mesh_axes2, mesh_axes3))
def f(x, y, z, a):
return x @ x.T, y, z, a
out1, out2, out3, out4 = f(gda1, gda2, gda3, gda4)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertEqual(out1.local_shards[0].index, (slice(0, 2), slice(0, 4)))
self.assertEqual(out1.local_shards[1].index, (slice(0, 2), slice(4, 8)))
self.assertListEqual([s.replica_id for s in out1.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
expected_matrix_mul = input_data @ input_data.T
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 2))
self.assertEqual(out2.local_shards[0].data.shape, (8, 2))
self.assertEqual(out2.local_shards[0].index, (slice(None), slice(None)))
self.assertEqual(out2.local_shards[1].index, (slice(None), slice(None)))
self.assertListEqual([s.replica_id for s in out2.local_shards],
[0, 1, 2, 3, 4, 5, 6, 7])
for s in out2.local_shards:
self.assertArraysEqual(s.data, input_data)
self.assertIsInstance(out3, global_device_array.GlobalDeviceArray)
self.assertEqual(out3.shape, (8, 2))
self.assertEqual(out3.local_shards[0].data.shape, (2, 2))
self.assertEqual(out3.local_shards[0].index, (slice(0, 2), slice(None)))
self.assertEqual(out3.local_shards[1].index, (slice(0, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out3.local_shards],
[0, 1, 0, 1, 0, 1, 0, 1])
for s in out3.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
self.assertIsInstance(out4, global_device_array.GlobalDeviceArray)
self.assertEqual(out4.shape, (8, 2))
self.assertEqual(out4.local_shards[0].data.shape, (1, 2))
self.assertEqual(out4.local_shards[0].index, (slice(0, 1), slice(None)))
self.assertEqual(out4.local_shards[1].index, (slice(1, 2), slice(None)))
self.assertListEqual([s.replica_id for s in out4.local_shards],
[0, 0, 0, 0, 0, 0, 0, 0])
for s in out4.local_shards:
self.assertArraysEqual(s.data, input_data[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_mixed_inputs(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(FROM_GDA, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(gda_obj, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1.mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2.mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_non_gda_inputs(self):
input_shape = (8, 2)
input_data = np.arange(prod(input_shape)).reshape(input_shape)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit,
in_axis_resources=(None, P('x', 'y')),
out_axis_resources=(P('x', 'y'), P(('x', 'y'))))
def f(x, y):
return x @ x.T, y @ y.T
expected_matrix_mul = input_data @ input_data.T
out1, out2 = f(input_data, input_data)
self.assertIsInstance(out1, global_device_array.GlobalDeviceArray)
self.assertEqual(out1.shape, (8, 8))
self.assertEqual(out1.local_shards[0].data.shape, (2, 4))
self.assertDictEqual(out1.mesh.shape, {'x': 4, 'y': 2})
for s in out1.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
self.assertIsInstance(out2, global_device_array.GlobalDeviceArray)
self.assertEqual(out2.shape, (8, 8))
self.assertEqual(out2.local_shards[0].data.shape, (1, 8))
self.assertDictEqual(out2.mesh.shape, {'x': 4, 'y': 2})
for s in out2.local_shards:
self.assertArraysEqual(s.data, expected_matrix_mul[s.index])
@jtu.with_mesh([('x', 2), ('y', 2)])
def test_pjit_gda_mesh_mismatch(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesRegex(ValueError,
"Pjit's mesh and GDA's mesh should be equal."):
@partial(pjit, in_axis_resources=FROM_GDA, out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_wrong_resource_for_gda_input(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x')
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Got an input GDA to pjit with different partitioning than specified "
'in the in_axis_resources argument to pjit. The partitioning must '
'match, or use `jax.experimental.pjit.FROM_GDA` in `in_axis_resources`. '
"Got GDA spec: PartitionSpec('x',) and "
"pjit spec: PartitionSpec('x', 'y') "
'for GDA: GlobalDeviceArray(shape=(8, 2), dtype=float32)'):
@partial(pjit, in_axis_resources=P('x', 'y'), out_axis_resources=P('x', 'y'))
def f(x):
return x
f(gda_obj)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_pjit_gda_caching(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_data = np.arange(
prod(input_shape), dtype=np.float32).reshape(input_shape)
def cb(index):
return input_data[index]
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
input_shape, global_mesh, mesh_axes, cb)
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=P('x', 'y'))
def f(x, y):
return x @ y.T
before_lower_cache = pjit_lib._pjit_lower.cache_info()
f(gda_obj, gda_obj)
after_lower_cache1 = pjit_lib._pjit_lower.cache_info()
self.assertEqual(before_lower_cache.hits, after_lower_cache1.hits)
self.assertEqual(before_lower_cache.misses + 1, after_lower_cache1.misses)
f(gda_obj, gda_obj)
after_lower_cache2 = pjit_lib._pjit_lower.cache_info()
self.assertEqual(after_lower_cache1.hits + 1, after_lower_cache2.hits)
self.assertEqual(after_lower_cache1.misses, after_lower_cache2.misses)
f(input_data, input_data)
after_lower_cache3 = pjit_lib._pjit_lower.cache_info()
self.assertEqual(after_lower_cache2.hits, after_lower_cache3.hits)
self.assertEqual(after_lower_cache2.misses + 1, after_lower_cache3.misses)
f(gda_obj, input_data)
after_lower_cache4 = pjit_lib._pjit_lower.cache_info()
self.assertEqual(after_lower_cache3.hits, after_lower_cache4.hits)
self.assertEqual(after_lower_cache3.misses + 1, after_lower_cache4.misses)
@jtu.with_mesh([('x', 4), ('y', 2)])
def test_partition_spec_mismatch_semantically_equivalent(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None)
global_input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
with jax._src.config.parallel_functions_output_gda(True):
gda_obj = global_device_array.GlobalDeviceArray.from_callback(
global_input_shape, global_mesh, mesh_axes, cb)
@partial(pjit, in_axis_resources=P(None), out_axis_resources=P(None))
def f(x):
return x
output_gda = f(gda_obj)
# Ensure output_gda.mesh_axes = P() is matched with P(None).
self.assertEqual(output_gda.mesh_axes, ())
# P(None) is in_axis_resources.
f(output_gda)
def test_from_gda_duplicates(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P('x', 'y')
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
# It's occasionally possible to end up with two FROM_GDA singletons (e.g. if
# pickling in_axis_resources and sending to other processes). Make sure this
# this doesn't cause an error to avoid user confusion.
from_gda_dup = pjit_lib._FromGdaSingleton()
with maps.Mesh(global_mesh.devices, global_mesh.axis_names):
pjit(lambda x: x, in_axis_resources=from_gda_dup, out_axis_resources=None)(
input_gda)
def test_no_recompilation_due_to_in_axis_resources(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None,)
input_gda = create_gda(global_input_shape, global_mesh, mesh_axes)
with jax._src.config.parallel_functions_output_gda(True):
@partial(pjit, in_axis_resources=mesh_axes, out_axis_resources=mesh_axes)
def f(x):
return x
with global_mesh:
out_gda = f(input_gda)
self.assertEqual(out_gda.mesh_axes, ())
before_cache = pjit_lib._pjit_lower.cache_info()
f(out_gda)
after_cache = pjit_lib._pjit_lower.cache_info()
self.assertEqual(before_cache.hits + 1, after_cache.hits)
self.assertEqual(before_cache.misses, after_cache.misses)
def test_no_recompilation_due_to_fully_replicated_and_gda_inputs(self):
global_mesh = jtu.create_global_mesh((1, 2), ('x', 'y'))
global_input_shape = (8, 2)
mesh_axes = P(None)
global_data = np.arange(
prod(global_input_shape)).reshape(global_input_shape)
with jax._src.config.parallel_functions_output_gda(True):
f = pjit(lambda x: x, in_axis_resources=mesh_axes,
out_axis_resources=mesh_axes)
with global_mesh:
out_gda = f(global_data)
self.assertEqual(out_gda.mesh_axes, ())
before_cache = pjit_lib._pjit_lower.cache_info()
f(out_gda)
after_cache = pjit_lib._pjit_lower.cache_info()
self.assertEqual(before_cache.hits + 1, after_cache.hits)
self.assertEqual(before_cache.misses, after_cache.misses)
def test_pjit_gda_aot_sharding_mismatch(self):
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_gda = create_gda(global_input_shape, global_mesh, P('x', 'y'))
with global_mesh:
f = pjit(lambda x: x, in_axis_resources=P('x'), out_axis_resources=P('x'))
compiled = f.lower(jax.ShapedArray(global_input_shape, jnp.float32)).compile()
with self.assertRaisesRegex(
ValueError, "GDA sharding does not match the input sharding."):
compiled(input_gda)
class AutoShardingPjitTest(jtu.JaxTestCase):
@parameterized.named_parameters(
('2d', (4, 2), (4, 2), ('x', 'y')),
# TODO(b/226977360): Support 3D mesh shape for example (2, 2, 2).
('3d', (1, 4, 2), (2, 4, 8, 4), ('x', 'y', 'z')),
('1d', (8,), (8, 2), ('x')),
)
def test_pjit_gda_auto_sharding(self, mesh_shape, global_input_shape,
mesh_axis_names):
if xla_bridge.get_backend().runtime_type == 'stream_executor':
raise unittest.SkipTest('AutoSharding is not supported on stream_executor yet.')
global_mesh = jtu.create_global_mesh(mesh_shape, mesh_axis_names)
input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
with jax._src.config.parallel_functions_output_gda(True):
with global_mesh:
f = pjit(lambda x: x, in_axis_resources=AUTO,
out_axis_resources=AUTO)
inp = jax.ShapedArray(input_data.shape, input_data.dtype)
compiled = f.lower(inp, _global_avals=True).compile()
inputs = [create_gda(global_input_shape, global_mesh, ip, input_data)
for ip in compiled.input_shardings]
gda_out = compiled(*inputs)
self.assertIsInstance(gda_out, global_device_array.GlobalDeviceArray)
self.assertArraysEqual(multihost_utils.process_allgather(gda_out),
input_data)
def test_pjit_gda_auto_sharding_multiple_calls_and_caching(self):
if xla_bridge.get_backend().runtime_type == 'stream_executor':
raise unittest.SkipTest('AutoSharding is not supported on stream_executor yet.')
global_mesh = jtu.create_global_mesh((4, 2), ('x', 'y'))
global_input_shape = (8, 2)
input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
with jax._src.config.parallel_functions_output_gda(True):
with global_mesh:
f = pjit(lambda x: x, in_axis_resources=AUTO,
out_axis_resources=AUTO)
inp = jax.ShapedArray(input_data.shape, input_data.dtype)
compiled = f.lower(inp, _global_avals=True).compile()
inputs = [create_gda(global_input_shape, global_mesh, ip, input_data)
for ip in compiled.input_shardings]
# `f` is first compiled in `compile_and_get_sharding`.
before_cache = pjit_lib._pjit_lower.cache_info()
gda_out1 = compiled(*inputs)
gda_out2 = f(gda_out1)
after_cache = pjit_lib._pjit_lower.cache_info()
self.assertEqual(before_cache.hits + 1, after_cache.hits)
self.assertEqual(before_cache.misses, after_cache.misses)
self.assertIsInstance(gda_out1, global_device_array.GlobalDeviceArray)
self.assertIsInstance(gda_out2, global_device_array.GlobalDeviceArray)
self.assertArraysEqual(multihost_utils.process_allgather(gda_out1),
input_data)
self.assertArraysEqual(multihost_utils.process_allgather(gda_out2),
input_data)
def test_xla_gda_sharding_mismatch(self):
if xla_bridge.get_backend().runtime_type == 'stream_executor':
raise unittest.SkipTest('AutoSharding is not supported on stream_executor yet.')
global_mesh = jtu.create_global_mesh((2, 2), ('x', 'y'))
global_input_shape = (4, 2)
input_data = np.arange(
prod(global_input_shape), dtype=np.float32).reshape(global_input_shape)
with jax._src.config.parallel_functions_output_gda(True):
with global_mesh:
f = pjit(lambda x: x, in_axis_resources=AUTO, out_axis_resources=AUTO)
inp = jax.ShapedArray(input_data.shape, input_data.dtype)
compiled = f.lower(inp, _global_avals=True).compile()
different_pspec = (P('y', 'x') if compiled.input_shardings[0] == P(('x',), ('y',))
else P('x', 'y'))
gda = create_gda(global_input_shape, global_mesh, different_pspec,
input_data)
with self.assertRaisesRegex(
ValueError, "GDA sharding does not match the input sharding."):
compiled(gda)
with self.assertRaisesRegex(
ValueError, "GDA sharding does not match the input sharding."):
f(gda)
def spec_regex(s):
return str(s).replace(r"(", r"\(").replace(r")", r"\)")
class PJitErrorTest(jtu.JaxTestCase):
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleArgs(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=True)
def testNonDivisibleOuts(self, mesh, resources):
x = jnp.ones((3, 2))
spec = P(resources, None)
mesh_size = str(np.prod([dim[1] for dim in mesh], dtype=np.int64))
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r".*"
r"implies that the size of its dimension 0 should be "
r"divisible by " + mesh_size + r", but it is equal to 3"):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=P(resources, None))(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesArgs(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit arguments.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesOuts(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of pjit outputs.*" + spec_regex(spec) + r", "
r"but resource axis x is undefined."):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@check_1d_2d_mesh(set_mesh=False)
@jtu.with_mesh([('z', 1)])
def testUndefinedResourcesConstraint(self, mesh, resources):
x = jnp.ones((2, 2))
spec = P(resources,)
with self.assertRaisesRegex(ValueError,
r"One of with_sharding_constraint arguments"
r".*" + spec_regex(spec) + r", but resource axis "
r"x is undefined."):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgs(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowArgsAxisResourcesNone(self):
x = jnp.arange(2)
spec = P(None, None)
error = (r"One of pjit arguments.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowOuts(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of pjit outputs.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 0")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x.sum(), in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRankTooLowConstraint(self):
x = jnp.arange(2)
spec = P('x', 'y')
error = (r"One of with_sharding_constraint arguments " +
r"was given.*" + spec_regex(spec) + r", which implies "
r"that it has a rank of at least 2, but it is 1")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: with_sharding_constraint(x, spec),
in_axis_resources=None, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedInResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single in_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=spec, out_axis_resources=None)(x)
@jtu.with_mesh([('x', 2), ('y', 1)])
def testRepeatedOutResources(self):
x = jnp.arange(2)
for spec in [P('x', 'x'), P('x', ('y', 'x'))]:
error = (r"A single out_axis_resources specification can map every mesh "
r"axis to at most one positional dimension, but " +
spec_regex(spec) + " has duplicate entries for `x`")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=spec)(x)
@jtu.with_mesh([('x', 2)])
def testInputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=spec, out_axis_resources=None),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testOutputShardsXMapAxis(self):
spec = P('x')
f = xmap(pjit(lambda x: x + 2, in_axis_resources=None, out_axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"pjit output has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testConstraintShardsXMapAxis(self):
spec = P('x')
f = xmap(lambda x: with_sharding_constraint(x, axis_resources=spec),
in_axes=['i', ...], out_axes=['i', ...], axis_resources={'i': 'x'})
x = jnp.arange(4).reshape((2, 2))
error = (r"with_sharding_constraint input has an axis resources specification of " +
spec_regex(spec) + r" that uses one or more mesh axes already used by "
r"xmap to partition a named axis appearing in its named_shape \(both "
r"use mesh axes `x`\)")
with self.assertRaisesRegex(JAXTypeError, error):
f(x)
@jtu.with_mesh([('x', 2)])
def testCatchesInnerXMapErrors(self):
f = pjit(xmap(lambda x, y: x, in_axes=(['i'], ['j']), out_axes=['i', 'j'],
axis_resources={'i': 'x', 'j': 'x'}),
in_axis_resources=None, out_axis_resources=None)
x = jnp.arange(4)
with self.assertRaises(JAXTypeError):
f(x, x)
def testEmptyMesh(self):
error = (r"pjit requires a non-empty mesh! Are you sure that it's defined "
r"at the call site?")
with self.assertRaisesRegex(RuntimeError, error):
pjit(lambda x: x, in_axis_resources=None, out_axis_resources=None)(jnp.arange(4))
@jtu.with_mesh([('x', 2)])
def testAxisResourcesMismatch(self):
x = jnp.ones([])
p = [None, None, None]
pjit(lambda x: x, (p,), p)([x, x, x]) # OK
error = re.escape(
"pjit in_axis_resources specification must be a tree prefix of the "
"positional arguments tuple passed to the `pjit`-decorated function. "
"In particular, pjit in_axis_resources must either be a None, a "
"PartitionSpec, or a tuple of length equal to the number of positional "
"arguments. But pjit in_axis_resources is the wrong length: got a "
"tuple or list of length 3 for an args tuple of length 2.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x, y: x, p, p)(x, x)
Foo = namedtuple('Foo', ['x'])
error = "in_axis_resources is not a tuple.*might need to be wrapped"
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, Foo(None), Foo(None))(Foo(x))
pjit(lambda x: x, (Foo(None),), Foo(None))(Foo(x)) # OK w/ singleton tuple
# TODO(apaszke,mattjj): Disable implicit list casts and enable this
# error = ("it looks like pjit in_axis_resources might need to be wrapped in "
# "a singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x, y: x, p, p)([x, x, x])
# TODO(apaszke): Disable implicit list casts and enable this
# error = re.escape(
# r"pjit in_axis_resources specification must be a tree prefix of the "
# r"corresponding value, got specification (None, None, None) for value "
# r"tree PyTreeDef(([*, *, *],)). Note that pjit in_axis_resources that "
# r"are non-trivial pytrees should always be wrapped in a tuple representing "
# r"the argument list. In particular, you're passing in a single argument "
# r"which means that pjit in_axis_resources might need to be wrapped in a "
# r"singleton tuple.")
# with self.assertRaisesRegex(ValueError, error):
# pjit(lambda x: x, p, p)([x, x, x]) # Error, but make sure we hint at singleton tuple
error = re.escape(
"pytree structure error: different numbers of pytree children at "
"key path\n"
" pjit out_axis_resources tree root\n"
"At that key path, the prefix pytree pjit out_axis_resources has a "
"subtree of type\n"
" <class 'list'>\n"
"with 2 children, but at the same key path the full pytree has a "
"subtree of the same type but with 3 children.")
with self.assertRaisesRegex(ValueError, error):
pjit(lambda x: x, (p,), [p, None])([x, x, x]) # Error, we raise a generic tree mismatch message
@jtu.with_mesh([('x', 2)])
def testNestedDifferentResources(self):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def f(x):
with maps.Mesh(np.array([jax.local_devices()[0]]), ('x')):
@partial(pjit, in_axis_resources=P('x'), out_axis_resources=None)
def h(x):
return x
return h(x)
xshape = (2, 5, 6)
x = jnp.arange(np.prod(xshape)).reshape(xshape)
with self.assertRaisesRegex(RuntimeError,
"Changing the physical mesh is not allowed.*"):
f(x)
class UtilTest(jtu.JaxTestCase):
def testOpShardingRoundTrip(self):
FakeDevice = namedtuple('FakeDevice', ['id'])
mesh_named_shape = OrderedDict([('a', 2), ('b', 3), ('c', 4), ('d', 7), ('e', 4)])
mesh_axes, mesh_shape = unzip2(mesh_named_shape.items())
devices = [FakeDevice(i) for i in range(np.prod(list(mesh_shape)))]
mesh = pxla.Mesh(np.array(devices).reshape(*mesh_shape), tuple(mesh_axes))
dims = 5
aval = jax.core.ShapedArray((len(devices),) * dims, jnp.float32)
def roundtrip(spec):
op_sharding = pjit_lib.get_aval_sharding_proto(aval, spec, mesh)
parsed_spec = pjit_lib.parse_flatten_op_sharding(op_sharding, mesh)[0].partitions
self.assertEqual(parsed_spec[:len(spec)], spec)
self.assertEqual(parsed_spec[len(spec):], ((),) * (len(parsed_spec) - len(spec)))
special_specs = [P()]
for spec in special_specs:
roundtrip(spec)
rng = self.rng()
for i in range(100):
spec = [()] * dims
for axis in rng.permutation(mesh_axes)[:rng.randint(low=1, high=len(mesh_axes) + 1)]:
spec[rng.choice(dims)] += (axis,)
roundtrip(P(*spec))
@parameterized.named_parameters(
("linear", {'x': 0, 'y': 1, 'z': 2}, P(('x',), ('y',), ('z',))),
("combine", {'x': 0, 'y': 0, 'z': 1}, P(('x', 'y'), ('z',))),
("skip", {'x': 0, 'y': 0, 'z': 2}, P(('x', 'y'), None, ('z',))),
("multi_skip", {'x': 0, 'y': 1, 'z': 3}, P(('x',), ('y',), None, ('z',))),
)
def test_array_mapping_to_axis_resources(self, inp, expected_out):
self.assertEqual(pxla.array_mapping_to_axis_resources(inp), expected_out)
def test_get_input_metadata_fully_replicated(self):
global_mesh = jtu.create_global_mesh((2, 2), ('x', 'y'))
global_in_aval1 = jax.core.ShapedArray((4, 4), jnp.int32)
global_in_aval2 = jax.core.ShapedArray((4, 4, 4), jnp.int32)
global_in_aval3 = jax.core.ShapedArray((), jnp.int32)
in_avals = [global_in_aval1, global_in_aval2, global_in_aval3]
_, out_indices, _ = pxla._get_input_metadata(
in_avals, global_mesh, [{}, {}, {}], [False, False, False])
self.assertLen(out_indices, len(in_avals))
self.assertTrue(all(len(out) == len(global_mesh.local_devices)
for out in out_indices))
self.assertTrue(all(len(i) == aval.ndim
for out, aval in safe_zip(out_indices, in_avals) for i in out))
self.assertTrue(all(i == (slice(None),) * aval.ndim
for out, aval in safe_zip(out_indices, in_avals) for i in out))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import, division, print_function
import os
import random
import sys
import threading
from datetime import datetime
import numpy as np
import tensorflow as tf
from six.moves import xrange # pylint: disable=redefined-builtin
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
threading_comm.py
|
import threading
from queue import Queue
def creator(data, q):
"""
생산자 : 쓰레드간 데이터 전송 예제
"""
print('Creating data and putting it on the queue')
print('\n')
for item in data:
evt = threading.Event()
q.put((item, evt))
print('Waiting for data to be doubled')
evt.wait()
def consumer(q):
"""
소비자 : 쓰레드간 데이터 전송 예제
"""
while True:
data, evt = q.get()
print('Receive Original Data : {}'.format(data))
processed = data * 5
print('Receive Processed Data : {}'.format(processed))
print('\n')
evt.set()
q.task_done()
if __name__ == '__main__':
q = Queue()
data = [7, 14, 39, 59, 77, 1, 109, 99, 167, 920, 1035]
thread_one = threading.Thread(target=creator, args=(data, q))
thread_two = threading.Thread(target=consumer, args=(q,))
thread_one.start()
thread_two.start()
q.join()
|
engine.py
|
import copy
import json
import os
import platform
import queue
import shlex
import subprocess
import threading
import time
import traceback
from typing import Callable, Dict, List, Optional
from kivy.utils import platform as kivy_platform
from katrain.core.constants import (
OUTPUT_DEBUG,
OUTPUT_ERROR,
OUTPUT_EXTRA_DEBUG,
OUTPUT_KATAGO_STDERR,
DATA_FOLDER,
KATAGO_EXCEPTION,
)
from katrain.core.game_node import GameNode
from katrain.core.lang import i18n
from katrain.core.sgf_parser import Move
from katrain.core.utils import find_package_resource, json_truncate_arrays
class BaseEngine: # some common elements between analysis and contribute engine
RULESETS_ABBR = [
("jp", "japanese"),
("cn", "chinese"),
("ko", "korean"),
("aga", "aga"),
("tt", "tromp-taylor"),
("nz", "new zealand"),
("stone_scoring", "stone_scoring"),
]
RULESETS = {fromkey: name for abbr, name in RULESETS_ABBR for fromkey in [abbr, name]}
def __init__(self, katrain, config):
self.katrain = katrain
self.config = config
@staticmethod
def get_rules(ruleset):
if ruleset.strip().startswith("{"):
try:
ruleset = json.loads(ruleset)
except json.JSONDecodeError:
pass
if isinstance(ruleset, dict):
return ruleset
return KataGoEngine.RULESETS.get(str(ruleset).lower(), "japanese")
def advance_showing_game(self):
pass # avoid transitional error
def status(self):
return "" # avoid transitional error
def get_engine_path(self, exe):
if not exe:
if kivy_platform == "win":
exe = "katrain/KataGo/katago.exe"
elif kivy_platform == "linux":
exe = "katrain/KataGo/katago"
else:
exe = find_package_resource("katrain/KataGo/katago-osx") # github actions built
if not os.path.isfile(exe) or "arm64" in platform.version().lower():
exe = "katago" # e.g. MacOS after brewing
if exe.startswith("katrain"):
exe = find_package_resource(exe)
exepath, exename = os.path.split(exe)
if exepath and not os.path.isfile(exe):
self.on_error(i18n._("Kata exe not found").format(exe=exe), "KATAGO-EXE")
return None
elif not exepath:
paths = os.getenv("PATH", ".").split(os.pathsep) + ["/opt/homebrew/bin/"]
exe_with_paths = [os.path.join(path, exe) for path in paths if os.path.isfile(os.path.join(path, exe))]
if not exe_with_paths:
self.on_error(i18n._("Kata exe not found in path").format(exe=exe), "KATAGO-EXE")
return None
exe = exe_with_paths[0]
return exe
def on_error(self, message, code, allow_popup):
print("ERROR", message, code)
class KataGoEngine(BaseEngine):
"""Starts and communicates with the KataGO analysis engine"""
def __init__(self, katrain, config):
super().__init__(katrain, config)
self.allow_recovery = self.config.get("allow_recovery", True) # if false, don't give popups
self.queries = {} # outstanding query id -> start time and callback
self.query_counter = 0
self.katago_process = None
self.base_priority = 0
self.override_settings = {"reportAnalysisWinratesAs": "BLACK"} # force these settings
self.analysis_thread = None
self.stderr_thread = None
self.write_stdin_thread = None
self.shell = False
self.write_queue = queue.Queue()
self.thread_lock = threading.Lock()
if config.get("altcommand", ""):
self.command = config["altcommand"]
self.shell = True
else:
model = find_package_resource(config["model"])
cfg = find_package_resource(config["config"])
exe = self.get_engine_path(config.get("katago", "").strip())
if not exe:
return
if not os.path.isfile(model):
self.on_error(i18n._("Kata model not found").format(model=model), code="KATAGO-FILES")
return # don't start
if not os.path.isfile(cfg):
self.on_error(i18n._("Kata config not found").format(config=cfg), code="KATAGO-FILES")
return # don't start
self.command = shlex.split(
f'"{exe}" analysis -model "{model}" -config "{cfg}" -analysis-threads {config["threads"]} -override-config "homeDataDir={os.path.expanduser(DATA_FOLDER)}"'
)
self.start()
def on_error(self, message, code=None, allow_popup=True):
self.katrain.log(message, OUTPUT_ERROR)
if self.allow_recovery and allow_popup:
self.katrain("engine_recovery_popup", message, code)
def start(self):
with self.thread_lock:
self.write_queue = queue.Queue()
try:
self.katrain.log(f"Starting KataGo with {self.command}", OUTPUT_DEBUG)
startupinfo = None
if hasattr(subprocess, "STARTUPINFO"):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # stop command box popups on win/pyinstaller
self.katago_process = subprocess.Popen(
self.command,
startupinfo=startupinfo,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=self.shell,
)
except (FileNotFoundError, PermissionError, OSError) as e:
self.on_error(i18n._("Starting Kata failed").format(command=self.command, error=e), code="c")
return # don't start
self.analysis_thread = threading.Thread(target=self._analysis_read_thread, daemon=True)
self.stderr_thread = threading.Thread(target=self._read_stderr_thread, daemon=True)
self.write_stdin_thread = threading.Thread(target=self._write_stdin_thread, daemon=True)
self.analysis_thread.start()
self.stderr_thread.start()
self.write_stdin_thread.start()
def on_new_game(self):
self.base_priority += 1
if not self.is_idle():
with self.thread_lock:
self.write_queue = queue.Queue()
self.terminate_queries(only_for_node=None, lock=False)
self.queries = {}
def terminate_queries(self, only_for_node=None, lock=True):
if lock:
with self.thread_lock:
return self.terminate_queries(only_for_node=only_for_node, lock=False)
for query_id, (_, _, _, _, node) in list(self.queries.items()):
if only_for_node is None or only_for_node is node:
self.terminate_query(query_id)
def terminate_query(self, query_id):
if query_id is not None:
self.send_query({"action": "terminate", "terminateId": query_id}, None, None)
self.queries.pop(query_id, None)
def restart(self):
self.queries = {}
self.shutdown(finish=False)
self.start()
def check_alive(self, os_error="", exception_if_dead=False, maybe_open_recovery=False):
ok = self.katago_process and self.katago_process.poll() is None
if not ok and exception_if_dead:
if self.katago_process:
code = self.katago_process and self.katago_process.poll()
if code == 3221225781:
died_msg = i18n._("Engine missing DLL")
else:
died_msg = i18n._("Engine died unexpectedly").format(error=f"{os_error} status {code}")
if code != 1: # deliberate exit
self.on_error(died_msg, code, allow_popup=maybe_open_recovery)
self.katago_process = None # return from threads
else:
self.katrain.log(i18n._("Engine died unexpectedly").format(error=os_error), OUTPUT_DEBUG)
return ok
def wait_to_finish(self):
while self.queries and self.katago_process and self.katago_process.poll() is None:
time.sleep(0.1)
def shutdown(self, finish=False):
process = self.katago_process
if finish and process:
self.wait_to_finish()
if process:
self.katago_process = None
self.katrain.log("Terminating KataGo process", OUTPUT_DEBUG)
process.terminate()
self.katrain.log("Terminated KataGo process", OUTPUT_DEBUG)
if finish is not None: # don't care if exiting app
for t in [self.write_stdin_thread, self.analysis_thread, self.stderr_thread]:
if t:
t.join()
def is_idle(self):
return not self.queries and self.write_queue.empty()
def queries_remaining(self):
return len(self.queries) + int(not self.write_queue.empty())
def _read_stderr_thread(self):
while self.katago_process is not None:
try:
line = self.katago_process.stderr.readline()
if line:
if b"Uncaught exception" in line or b"what()" in line: # linux=what
msg = f"KataGo Engine Failed: {line.decode(errors='ignore')[9:].strip()}"
self.on_error(msg, KATAGO_EXCEPTION)
return
try:
self.katrain.log(line.decode(errors="ignore").strip(), OUTPUT_KATAGO_STDERR)
except Exception as e:
print("ERROR in processing KataGo stderr:", line, "Exception", e)
elif not self.check_alive(exception_if_dead=True):
return
except Exception as e:
self.katrain.log(f"Exception in reading stderr: {e}", OUTPUT_DEBUG)
return
def _analysis_read_thread(self):
while self.katago_process is not None:
try:
line = self.katago_process.stdout.readline().strip()
if self.katago_process and not line:
if not self.check_alive(exception_if_dead=True, maybe_open_recovery=True):
return
except OSError as e:
self.check_alive(os_error=str(e), exception_if_dead=True, maybe_open_recovery=True)
return
if b"Uncaught exception" in line:
msg = f"KataGo Engine Failed: {line.decode(errors='ignore')}"
self.on_error(msg, KATAGO_EXCEPTION)
return
if not line:
continue
try:
analysis = json.loads(line)
if "id" not in analysis:
self.katrain.log(f"Error without ID {analysis} received from KataGo", OUTPUT_ERROR)
continue
query_id = analysis["id"]
if query_id not in self.queries:
self.katrain.log(
f"Query result {query_id} discarded -- recent new game or node reset?", OUTPUT_DEBUG
)
continue
callback, error_callback, start_time, next_move, _ = self.queries[query_id]
if "error" in analysis:
del self.queries[query_id]
if error_callback:
error_callback(analysis)
elif not (next_move and "Illegal move" in analysis["error"]): # sweep
self.katrain.log(f"{analysis} received from KataGo", OUTPUT_ERROR)
elif "warning" in analysis:
self.katrain.log(f"{analysis} received from KataGo", OUTPUT_DEBUG)
elif "terminateId" in analysis:
self.katrain.log(f"{analysis} received from KataGo", OUTPUT_DEBUG)
else:
partial_result = analysis.get("isDuringSearch", False)
if not partial_result:
del self.queries[query_id]
time_taken = time.time() - start_time
results_exist = not analysis.get("noResults", False)
self.katrain.log(
f"[{time_taken:.1f}][{query_id}][{'....' if partial_result else 'done'}] KataGo analysis received: {len(analysis.get('moveInfos',[]))} candidate moves, {analysis['rootInfo']['visits'] if results_exist else 'n/a'} visits",
OUTPUT_DEBUG,
)
self.katrain.log(json_truncate_arrays(analysis), OUTPUT_EXTRA_DEBUG)
try:
if callback and results_exist:
callback(analysis, partial_result)
except Exception as e:
self.katrain.log(f"Error in engine callback for query {query_id}: {e}", OUTPUT_ERROR)
traceback.print_exc()
if getattr(self.katrain, "update_state", None): # easier mocking etc
self.katrain.update_state()
except Exception as e:
self.katrain.log(f"Unexpected exception {e} while processing KataGo output {line}", OUTPUT_ERROR)
traceback.print_exc()
def _write_stdin_thread(self): # flush only in a thread since it returns only when the other program reads
while self.katago_process is not None:
try:
query, callback, error_callback, next_move, node = self.write_queue.get(block=True, timeout=0.1)
except queue.Empty:
continue
with self.thread_lock:
if "id" not in query:
self.query_counter += 1
query["id"] = f"QUERY:{str(self.query_counter)}"
if query.get("action") != "terminate":
self.queries[query["id"]] = (callback, error_callback, time.time(), next_move, node)
self.katrain.log(f"Sending query {query['id']}: {json.dumps(query)}", OUTPUT_DEBUG)
try:
self.katago_process.stdin.write((json.dumps(query) + "\n").encode())
self.katago_process.stdin.flush()
except OSError as e:
self.katrain.log(f"Exception in writing to katago: {e}", OUTPUT_DEBUG)
return # some other thread will take care of this
def send_query(self, query, callback, error_callback, next_move=None, node=None):
self.write_queue.put((query, callback, error_callback, next_move, node))
def request_analysis(
self,
analysis_node: GameNode,
callback: Callable,
error_callback: Optional[Callable] = None,
visits: int = None,
analyze_fast: bool = False,
time_limit=True,
find_alternatives: bool = False,
region_of_interest: Optional[List] = None,
priority: int = 0,
ownership: Optional[bool] = None,
next_move: Optional[GameNode] = None,
extra_settings: Optional[Dict] = None,
report_every: Optional[float] = None,
):
nodes = analysis_node.nodes_from_root
moves = [m for node in nodes for m in node.moves]
initial_stones = [m for node in nodes for m in node.placements]
clear_placements = [m for node in nodes for m in node.clear_placements]
if clear_placements: # TODO: support these
self.katrain.log(f"Not analyzing node {analysis_node} as there are AE commands in the path", OUTPUT_DEBUG)
return
if next_move:
moves.append(next_move)
if ownership is None:
ownership = self.config["_enable_ownership"] and not next_move
if visits is None:
visits = self.config["max_visits"]
if analyze_fast and self.config.get("fast_visits"):
visits = self.config["fast_visits"]
size_x, size_y = analysis_node.board_size
if find_alternatives:
avoid = [
{
"moves": list(analysis_node.analysis["moves"].keys()),
"player": analysis_node.next_player,
"untilDepth": 1,
}
]
elif region_of_interest:
xmin, xmax, ymin, ymax = region_of_interest
avoid = [
{
"moves": [
Move((x, y)).gtp()
for x in range(0, size_x)
for y in range(0, size_y)
if x < xmin or x > xmax or y < ymin or y > ymax
],
"player": player,
"untilDepth": 1, # tried a large number here, or 2, but this seems more natural
}
for player in "BW"
]
else:
avoid = []
settings = copy.copy(self.override_settings)
if time_limit:
settings["maxTime"] = self.config["max_time"]
if self.config.get("wide_root_noise", 0.0) > 0.0: # don't send if 0.0, so older versions don't error
settings["wideRootNoise"] = self.config["wide_root_noise"]
query = {
"rules": self.get_rules(analysis_node.ruleset),
"priority": self.base_priority + priority,
"analyzeTurns": [len(moves)],
"maxVisits": visits,
"komi": analysis_node.komi,
"boardXSize": size_x,
"boardYSize": size_y,
"includeOwnership": ownership and not next_move,
"includeMovesOwnership": ownership and not next_move,
"includePolicy": not next_move,
"initialStones": [[m.player, m.gtp()] for m in initial_stones],
"initialPlayer": analysis_node.initial_player,
"moves": [[m.player, m.gtp()] for m in moves],
"overrideSettings": {**settings, **(extra_settings or {})},
}
if report_every is not None:
query["reportDuringSearchEvery"] = report_every
if avoid:
query["avoidMoves"] = avoid
self.send_query(query, callback, error_callback, next_move, analysis_node)
analysis_node.analysis_visits_requested = max(analysis_node.analysis_visits_requested, visits)
|
app.py
|
#!/usr/bin/python
"""
app.py - Program to execute command on Multiple Linux Servers
Dependency - cryptography, paramiko
Input: list of hostnames
command
Output: Stdout of the Command from each Servers
"""
import sys, os, string, threading
import traceback
import paramiko # Great implementation of SSH in Python
uname = "root" # Connecting as root (Easy to test with Docker)
key = paramiko.RSAKey.from_private_key_file("./private.pem") # Same Private key for all the system
lock = threading.Lock()
"""
Executes command on the remote system and prints the output on the screen
Params : host, command
Output : Output of the command execution with details
"""
def executeCommand(host, command):
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=uname, pkey = key)
stdin, stdout, stderr = ssh.exec_command(command)
stdin.flush()
with lock:
print("Host:"+host)
print("\n"+stdout.readlines())
print("\n"+stderr.readlines())
except:
print(traceback.format_exc())
return;
def main():
threads = []
host =str(sys.argv[1])
if(!host):
print("usage: python app.py <List of hosts in comma separated>")
sys.exit()
command = input('Enter the command to execute: ')
hosts = [i.strip() for i in host.split(',')]
for h in hosts:
t = threading.Thread(target=executeCommand, args=(h,command))
t.start()
threads.append(t)
for t in threads:
t.join()
if __name__ == "__main__":
sys.exit(main())
|
nullinux.py
|
#!/usr/bin/env python3
from __future__ import print_function
import sys
import argparse
import datetime
from time import sleep
from ipparser import ipparser
from threading import Thread, activeCount
if sys.version_info[0] < 3:
from commands import getoutput
else:
from subprocess import getoutput
class nullinux():
known_users = ['Administrator', 'Guest', 'krbtgt', 'root', 'bin']
domain_sid = ""
acquired_users = []
def __init__(self, username, password, verbose, output_file):
self.username = username
self.password = password
self.verbose = verbose
self.output_file = output_file
def enum_os(self, target):
cmd = "smbclient //{}/IPC$ -U {}%{} -t 1 -c exit".format(target,self.username, self.password)
for line in getoutput(cmd).splitlines():
if "Domain=" in line:
# OS info is no longer enumerated in newer Windows servers
print_success("{}: {}".format(target, line))
elif "NT_STATUS_LOGON_FAILURE" in line:
print_failure("{}: Authentication Failed".format(target))
return False
return True
def get_dom_sid(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating Domain Information for: {}".format(target))
cmd = "rpcclient -c lsaquery -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "Domain Name:" in line:
print_success(line)
elif "Domain Sid:" in line:
self.domain_sid = line.split(":")[1].strip()
print_success("Domain SID: {}".format(self.domain_sid))
if not self.domain_sid:
print_failure("Could not attain Domain SID")
def create_userfile(self):
openfile = open(self.output_file, 'a')
for user in self.acquired_users:
openfile.write('{}\n'.format(user))
openfile.close()
def enum_shares(self, target):
count = 0
acquired_shares = []
smbclient_types = ['Disk', 'IPC', 'Printer']
print("\n\033[1;34m[*]\033[1;m Enumerating Shares for: {}".format(target))
cmd = "smbclient -L {} -U {}%{} -t 2".format(target, self.username, self.password)
for line in getoutput(cmd).splitlines():
if count == 0: #Print Enum Share Heading
print(" {:26} {}".format("Shares", "Comments"))
print(" " + "-" * 43)
count += 1
for t in smbclient_types: #Check if output in known share types
if t in line:
try:
if 'IPC$' in line:
print(" \\\{}\{}".format(target, "IPC$"))
acquired_shares.append("IPC$")
else:
share = line.split(t)[0].strip()
comment = line.split(t)[1].strip()
print(" \\\{}\{:15} {}".format(target, share, comment))
acquired_shares.append(share)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
if acquired_shares:
#Enumerate dir of each new share
for s in acquired_shares:
self.enum_dir(target, s)
else:
print(" ")
print_failure("No Shares Detected")
def share_header(self, target, share):
print("\n ", end='')
print_status("Enumerating: \\\%s\%s" % (target, share))
def enum_dir(self, target, share):
header_count = 0
cmd = "smbclient //{}/\'{}\' -t 3 -U {}%{} -c dir".format(target, share, self.username, self.password)
for line in getoutput(cmd).splitlines():
if "NT_STATUS" in line or "_ACCESS_DENIED" in line:
if self.verbose:
if header_count == 0:
header_count += 1
self.share_header(target, share)
print(" ", end='')
print_failure(line)
elif "Domain=" in line or "blocks available" in line or "WARNING" in line or "failed:" in line or not line:
pass
else:
if header_count == 0:
header_count += 1
self.share_header(target, share)
print(" "+line)
def enum_querydispinfo(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating querydispinfo for: {}".format(target))
cmd = "rpcclient -c querydispinfo -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("Name:")[0].split("Account:")[1].strip()
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_enumdomusers(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating enumdomusers for: {}".format(target))
cmd = "rpcclient -c enumdomusers -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("[")[1].split("]")[0].strip()
print(" "+user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_lsa(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating LSA for: {}".format(target))
cmd = "rpcclient -c lsaenumsid -U {}%{} {}".format(self.username, self.password, target)
output = getoutput(cmd)
for line in output.splitlines():
try:
if "S-1-5-21" in line:
user_sid = "rpcclient -c 'lookupsids {}' -U {}%{} {}".format(line, self.username, self.password, target)
for x in getoutput(user_sid).splitlines():
user_account = x.split("\\")[1].split("(")[0].strip()
count = int(x.split("(")[1].split(")")[0].strip())
if count == 1:
if self.verbose:
print(" "+x)
else:
print(" "+user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
elif count > 1 and "*unknown*\*unknown*" not in line:
if self.verbose:
print(" {:35} (Network/LocalGroup)".format(x))
else:
print(" {:35} (Network/Local Group)".format(user_account))
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def rid_cycling(self, target, ridrange, max_threads):
print("\n\033[1;34m[*]\033[1;m Performing RID Cycling for: {}".format(target))
if not self.domain_sid:
print_failure("RID Failed: Could not attain Domain SID")
return False
# Handle custom RID range input
try:
r = ridrange.split("-")
rid_range = list(range(int(r[0]), int(r[1])+1))
except:
print_failure("Error parsing custom RID range, reverting to default")
rid_range = list(range(500, 551))
for rid in rid_range:
try:
Thread(target=self.rid_thread, args=(rid,target,), daemon=True).start()
except:
pass
while activeCount() > max_threads:
sleep(0.001)
while activeCount() > 1:
sleep(0.001)
def rid_thread(self, rid, target):
cmd = "rpcclient -c \"lookupsids {}-{}\" -U {}%{} {}".format(self.domain_sid, rid, self.username, self.password,target)
for line in getoutput(cmd).splitlines():
if "S-1-5-21" in line:
# Split output to get username/group name
user_account = line.split("\\")[1].split("(")[0].strip()
count = int(line.split("(")[1].split(")")[0].strip())
if count == 1:
if self.verbose:
print(" " + line)
else:
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
elif count > 1 and "*unknown*\*unknown*" not in line:
if self.verbose:
print(" {:35} (Network/LocalGroup)".format(line))
else:
print(" {:35} (Network/LocalGroup)".format(user_account))
def enum_known_users(self, target):
print("\n\033[1;34m[*]\033[1;m Testing {} for Known Users".format(target))
for user in self.known_users:
cmd = "rpcclient -c \"lookupnames {}\" -U {}%{} {}".format(user, self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "S-1-5" in line:
try:
user_account = line.split(" ")[0].strip()
if self.verbose:
print(" " + line)
else:
print(" " + user_account)
if user_account not in self.acquired_users and int(line.split("User:")[1]) == 1:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_dom_groups(self, target):
print("\n\033[1;34m[*]\033[1;m Enumerating Group Memberships for: {}".format(target))
cmd = "rpcclient -c enumdomgroups -U {}%{} {}".format(self.username, self.password, target)
for line in getoutput(cmd).splitlines():
if "rid:" in line:
try:
group = line.split("[")[1].split("]")[0].strip()
print_success("Group: %s" % (group))
self.enum_group_mem(target, group)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def enum_group_mem(self, target, group):
cmd = "net rpc group members \'{}\' -U {}%{} -I {}".format(group, self.username, self.password, target)
for line in getoutput(cmd).splitlines():
try:
user_account = line.split("\\")[1].strip()
print(" " + user_account)
if user_account not in self.acquired_users:
self.acquired_users.append(user_account)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
except:
pass
def print_success(msg):
print('\033[1;32m[+]\033[0m {}'.format(msg))
def print_status(msg):
print('\033[1;34m[*]\033[0m {}'.format(msg))
def print_failure(msg):
print('\033[1;31m[-]\033[0m {}'.format(msg))
def time_stamp():
return datetime.datetime.now().strftime('%m-%d-%Y %H:%M')
def nullinux_enum(args, scan, target):
scan.enum_os(target)
if args.users:
scan.enum_shares(target)
if args.shares:
if not scan.domain_sid:
scan.get_dom_sid(target)
scan.enum_querydispinfo(target)
scan.enum_enumdomusers(target)
if not args.quick:
scan.enum_lsa(target)
scan.rid_cycling(target, args.rid_range, args.max_threads)
scan.enum_known_users(target)
scan.enum_dom_groups(target)
def main(args):
print("\n Starting nullinux v{} | {}\n\n".format(version, time_stamp()))
scan = nullinux('\"{}\"'.format(args.username), '\"{}\"'.format(args.password), args.verbose, args.output_file)
for t in args.target:
try:
if args.rid_only:
scan.get_dom_sid(t)
scan.rid_cycling(t, args.rid_range, args.max_threads)
else:
nullinux_enum(args, scan, t)
except Exception as e:
print("\n[*] Main Error: {}\n\n".format(e))
if args.users:
print("\n\033[1;34m[*]\033[1;m {} unique user(s) identified".format(len(scan.acquired_users)))
if scan.acquired_users:
print("\033[1;32m[+]\033[1;m Writing users to file: {}\n".format(args.output_file))
scan.create_userfile()
if __name__ == '__main__':
try:
version = '5.4.1'
args = argparse.ArgumentParser(description=("""
nullinux | v{0}
-----------------------------------
SMB null-session enumeration tool to gather OS,
user, share, and domain information.
usage:
nullinux -users -quick DC1.demo.local,10.0.1.1
nullinux -rid -range 500-600 10.0.0.1
nullinux -shares -U 'Domain\\User' -P 'Password1' 10.0.0.1""").format(version), formatter_class=argparse.RawTextHelpFormatter, usage=argparse.SUPPRESS)
args.add_argument('-v', dest="verbose", action='store_true', help="Verbose output")
args.add_argument('-o', dest="output_file", type=str, default="./nullinux_users.txt", help="Output users to the specified file")
auth = args.add_argument_group("Authentication")
auth.add_argument('-u', '-U', dest='username', type=str, default="", help='Username')
auth.add_argument('-p', '-P', dest='password', type=str, default="", help='Password')
enum = args.add_argument_group("Enumeration")
enum.add_argument('-shares', dest="shares", action='store_false', help="Enumerate shares only")
enum.add_argument('-users', dest="users", action='store_false', help="Enumerate users only")
enum.add_argument('-q', '-quick', dest="quick", action='store_true', help="Fast user enumeration")
enum.add_argument('-r', '-rid', dest="rid_only", action='store_true', help="Perform RID cycling only")
enum.add_argument('-range', dest='rid_range', type=str, default="500-550", help='Set Custom RID cycling range (Default: \'500-550\')')
enum.add_argument('-T', dest='max_threads', type=int, default=15, help='Max threads for RID cycling (Default: 15)')
args.add_argument(dest='target', nargs='+', help='Target server')
args = args.parse_args()
args.target = ipparser(args.target[0])
main(args)
except KeyboardInterrupt:
print("\n[!] Key Event Detected...\n\n")
sys.exit(0)
|
create_server.py
|
import sqlite3
import sys
import time
import subprocess
import os
# import requests
import threading
import ParticleCloud
import scheduler
import PCA9555
import logging
# import logging_tree
# from flask import Flask, render_template, request
import cherrypy
import json
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd())
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': '../static',
'tools.staticdir.index': 'html/index.html'
}
}
log = logging.getLogger('')
def schedule_daemon():
while True:
print "Checking events"
event = tasklist.check_event()
if event != None:
print "Theres an event!"
growerapp.relay_set(event[0],event[1],event[2])
time.sleep(10)
class CherryServer():
@cherrypy.expose
def index(self):
# return "Hello world"
raise cherrypy.HTTPRedirect("/static")
def _cp_dispatch(self, vpath):
logging.warning('Value of vpath is %s, and number of elements is: %d, and request type is: %s' % (vpath,len(vpath),str(cherrypy.request.method.upper()) ) )
# logging.warning(cherrypy.request.method.upper() )
# if len(vpath) > 1:
# if vpath[1] == "javascripts":
# # logging.warning("Trying to load a javascript file")
# cherrypy.request.params['name'] = vpath.pop()
# return self
# if vpath[1] == "users":
# logging.warning("Trying to load list of users %d" % (1,))
# cherrypy.request.params['userId'] = vpath.pop()
# return self.users
# if len(vpath) == 3:
# cherrypy.request.params['artist'] = vpath.pop(0) # /band name/
# vpath.pop(0) # /albums/
# cherrypy.request.params['title'] = vpath.pop(0) # /album title/
# return self.users
# return vpath
# return vpath
@cherrypy.expose
def users(self,userId=None):
# logging_tree.printout()
return "UserId =" + str(userId)
@cherrypy.expose
@cherrypy.tools.json_out()
def SetRelay(self,devname,relay,val):
logging.debug('SetRelay Call: Devname: %s Relay: %d, Val: %d' % (devname,int(relay), int(val) ) )
# growerapp.relay_set(int(relay),int(val),devname )
io.relay_set(int(relay),int(val) )
return json.dumps({"response" : "1"})
@cherrypy.expose
@cherrypy.tools.json_out()
def AddTask(self,devname,relay,val,hour,minute):
logging.debug('AddTask Call: Relay: %d, Val: %d, Hour: %d, Minute: %d' % (int(relay), int(val), int(hour), int(minute) ) )
tasklist.add_to_table(devname,int(hour),int(minute),int(relay),int(val) )
@cherrypy.expose
@cherrypy.tools.json_out()
def RemoveTask(self,devname,hour,minute):
logging.debug('RemoveTask Call: Hour: %d, Minute: %d' % (int(hour), int(minute) ) )
tasklist.remove_event(devname,int(hour),int(minute) )
@cherrypy.expose
@cherrypy.tools.json_out()
def PrintSchedule(self,devname):
responseData = tasklist.print_dev_table(devname)
print "length of response data + " + str(len(responseData))
if len(responseData) == 0:
return "<p>No Events Schedule for Device: " + str(devname) + "</p>"
html = "<tr> <th> Hour </th> <th> Min </th> <th> Relay </th> <th> Val </th> </tr>"
for i in xrange(0,len(responseData)):
html = html + "<tr> <th>" + str(responseData[i][0]) + "</th> <th>" + str(responseData[i][1]) + "</th> <th>" + str(responseData[i][2]) + "</th> <th>" + str(responseData[i][3])
html = html + "</tr>"
return html
@cherrypy.expose
@cherrypy.tools.json_out()
def GetRelayStatus(self):
# data = growerapp.relay_status()
data = io.relay_status()
logging.debug('GetRelayStatus Call: Data: %s' % str(data) )
return data
@cherrypy.expose
@cherrypy.tools.json_out()
def ShowDevices(self):
responseData = growerapp.getDeviceList()
responseJSON = {}
# for i in xrange(0,len(responseData)):
# responseJSON.update({i:responseData[i]})
return responseData
@cherrypy.expose
@cherrypy.tools.json_out()
def SelectDevice(self,name):
rname = growerapp.setCurrentDevice(name)
logging.debug('SelectDevice Call: Data: %s' % str(rname) )
if __name__ == '__main__':
logging.basicConfig(filename='../example.log', level=logging.WARN, format='%(asctime)s - %(levelname)s - %(message)s')
logging.getLogger('cherrypy').propagate = False
# logging_tree.printout()
# tasklist = scheduler.Scheduler()
# growerapp = ParticleCloud.Controller()
# growerapp.login()
io = PCA9555.PCA9555()
# t = threading.Thread(target=schedule_daemon)
# t.daemon = True
# t.start()
cherrypy.config.update({'server.socket_host': '0.0.0.0','server.socket_port': 80})
cherrypy.quickstart(CherryServer(),'/',conf)
# growerapp.logout()
|
actor_max_memory_test.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import parl
import unittest
import time
import threading
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.client import disconnect
from parl.remote.monitor import ClusterMonitor
from multiprocessing import Process
@parl.remote_class(max_memory=350)
class Actor(object):
def __init__(self, x=10):
self.x = x
self.data = []
def add_500mb(self):
self.data.append(os.urandom(500 * 1024**2))
self.x += 1
return self.x
from parl.utils import logger
class TestMaxMemory(unittest.TestCase):
def tearDown(self):
disconnect()
#In windows, multiprocessing.Process cannot run the method of class, but static method is ok.
@staticmethod
def actor(cluster_addr):
parl.connect(cluster_addr)
actor1 = Actor()
time.sleep(10)
actor1.add_500mb()
def test_max_memory(self):
port = 3001
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(5)
cluster_addr = 'localhost:{}'.format(port)
worker = Worker(cluster_addr, 1)
cluster_monitor = ClusterMonitor(cluster_addr)
time.sleep(5)
parl.connect(cluster_addr)
actor = Actor()
time.sleep(20)
self.assertEqual(1, cluster_monitor.data['clients'][0]['actor_num'])
del actor
time.sleep(10)
p = Process(target=self.actor, args=(cluster_addr, ))
p.start()
for _ in range(6):
x = cluster_monitor.data['clients'][0]['actor_num']
if x == 0:
break
else:
time.sleep(10)
if x == 1:
raise ValueError("Actor max memory test failed.")
self.assertEqual(0, cluster_monitor.data['clients'][0]['actor_num'])
p.terminate()
worker.exit()
master.exit()
if __name__ == '__main__':
unittest.main()
|
test_recv_save_op.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import signal
import time
import shutil
import unittest
from multiprocessing import Process
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.transpiler.details import VarStruct, VarsDistributed
from dist_test_utils import *
from paddle.fluid.transpiler.distribute_transpiler import DistributedMode
def run_pserver(pserver_id):
remove_ps_flag(os.getpid())
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
# create table parameter in scope
place = fluid.CPUPlace()
# create and initialize Param Variable
param = scope.var('table').get_tensor()
param_array = np.ones((5, 8)).astype("float32")
for i in range(len(param_array)):
param_array[i] *= param_array[i] * i + pserver_id * 10 + 1
param.set(param_array, place)
optimize_block = program._create_block(program.global_block().idx)
program.global_block().append_op(
type="listen_and_serv",
inputs={'X': []},
outputs={},
attrs={
"optimize_blocks": [optimize_block],
"endpoint": '127.0.0.1:0',
"Fanin": 1,
"distributed_mode": DistributedMode.SYNC,
"grad_to_block_id": []
})
exe = fluid.Executor(place)
exe.run(program)
class TestListenAndServOp(unittest.TestCase):
def setUp(self):
self.ps_timeout = 5
def _start_pserver(self, pserver_id, pserver_func):
p = Process(target=pserver_func, args=(pserver_id, ))
p.daemon = True
p.start()
return p
def _wait_ps_ready(self, pid):
start_left_time = self.ps_timeout
sleep_time = 0.5
while True:
assert start_left_time >= 0, "wait ps ready failed"
time.sleep(sleep_time)
try:
# the listen_and_serv_op would touch a file which contains the listen port
# on the /tmp directory until it was ready to process all the RPC call.
os.stat("/tmp/paddle.%d.port" % pid)
return
except os.error:
start_left_time -= sleep_time
def _get_pserver_port(self, pid):
with open("/tmp/paddle.%d.port" % pid, 'r') as f:
port = int(f.read().strip())
return port
def _run_nce_op_two_pserver(self, place, port0, port1, model_file):
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
emaps = ['127.0.0.1:' + str(port0), '127.0.0.1:' + str(port1)]
# create and run recv and save operator
remote_recv_op = Operator(
"recv_save",
trainer_id=0,
shape=[10, 8],
slice_shapes=["5,8", "5,8"],
slice_varnames=["table", "table"],
remote_varnames=['table', 'table'],
endpoints=emaps,
file_path=model_file)
remote_recv_op.run(scope, place)
def _load_slice_var(self, model_file):
load_prog = fluid.Program()
load_block = load_prog.global_block()
origin = load_block.create_var(
name="var.origin",
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[10, 8],
dtype="float32",
persistable=True)
slice0 = load_block.create_var(
name="var.slice0",
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[3, 8],
dtype="float32",
persistable=True)
slice1 = load_block.create_var(
name="var.slice1",
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[5, 8],
dtype="float32",
persistable=True)
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [origin]},
attrs={'file_path': model_file})
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [slice0]},
attrs={
'file_path': model_file,
'seek': 2 * 8,
'shape': slice0.shape
})
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [slice1]},
attrs={
'file_path': model_file,
'seek': 5 * 8,
'shape': slice1.shape
})
exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(load_prog)
origin_var = fluid.global_scope().find_var("var.origin")
slice0_var = fluid.global_scope().find_var("var.slice0")
slice1_var = fluid.global_scope().find_var("var.slice1")
origin = np.array(origin_var.get_tensor())
slice0 = np.array(slice0_var.get_tensor())
slice1 = np.array(slice1_var.get_tensor())
np.testing.assert_equal(origin[2:5], slice0)
np.testing.assert_equal(origin[5:10], slice1)
def _save_by_io_persistables(self, place, port0, port1, dirname, var_name):
exe = fluid.Executor(place=place)
vars_overview = VarsDistributed()
orig_var = VarStruct(
name=var_name,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[10, 8],
dtype="float32",
lod_level=0,
persistable=True)
slice_0_var = VarStruct(
name=var_name,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[5, 8],
dtype="float32",
lod_level=0,
persistable=True)
slice_1_var = VarStruct(
name=var_name,
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[5, 8],
dtype="float32",
lod_level=0,
persistable=True)
vars_overview.add_distributed_var(
origin_var=orig_var,
slice_var=slice_0_var,
block_id=0,
offset=0,
is_slice=True,
vtype="RemotePrefetch",
endpoint="{}:{}".format("127.0.0.1", port0))
vars_overview.add_distributed_var(
origin_var=orig_var,
slice_var=slice_1_var,
block_id=1,
offset=40,
is_slice=True,
vtype="RemotePrefetch",
endpoint="{}:{}".format("127.0.0.1", port1))
program = Program()
program._is_distributed = True
program._is_chief = True
program._parameters_on_pservers = vars_overview
fluid.io.save_persistables(exe, dirname, program)
def test_recv_save_op_remote(self):
# run pserver on CPU in sync mode
p0 = self._start_pserver(0, run_pserver)
self._wait_ps_ready(p0.pid)
port0 = self._get_pserver_port(p0.pid)
p1 = self._start_pserver(1, run_pserver)
self._wait_ps_ready(p1.pid)
port1 = self._get_pserver_port(p1.pid)
places = [core.CPUPlace()]
param_dir = "./model_for_test_recv_save_op/"
param_name = "table"
for place in places:
self._save_by_io_persistables(place, port0, port1, param_dir,
param_name)
# raise SIGTERM to pserver
os.kill(p0.pid, signal.SIGINT)
p0.join()
os.kill(p1.pid, signal.SIGINT)
p1.join()
self._load_slice_var(param_dir + param_name)
shutil.rmtree(param_dir)
if __name__ == '__main__':
unittest.main()
|
interact.py
|
from TTS.text2speech import tts_class
from multiprocessing import Process
import faiss
import time
import sqlite3
import csv
import random
import copy
import tensorflow_hub as hub
import tensorflow_text
import math
import numpy as np
import pickle
from Retriever.Retrieve import retrieve
import Utils.functions as utils
from ReRanker.rerank import rank_and_choose
from Generator.generator import generate as DialoGPT_Generate
from Classifier.model.dialog_acts import Encoder as Classifier
from Sentence_Encoder.meta_response_encoder_fast import encode as response_encode
from Sentence_Encoder.meta_query_encoder_fast import encode as query_encode
import Sentence_Encoder.encoder_client as encoder_client
import tensorflow as tf
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config
import torch.nn.functional as F
import torch.nn as nn
import torch as T
import os
import sys
import argparse
import logging
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.basicConfig(level=logging.CRITICAL)
parser = argparse.ArgumentParser(description="Chatbot")
parser.add_argument('--voice', dest='voice', action='store_true')
parser.add_argument('--no-voice', dest='voice', action='store_false')
parser.set_defaults(voice=True)
flags = parser.parse_args()
device = "cuda"
with open("Retriever/Faiss_index/thread_idx.pkl", 'rb') as fp:
idx = pickle.load(fp)
index = faiss.read_index('Retriever/Faiss_index/large.index')
# LOAD DATABASE
conn = sqlite3.connect('Retriever/Database/reddit.db')
c = conn.cursor()
# LOAD SCRIPTS
with open('Scripted/Processed_Scripts/Bot_Profile.pkl', 'rb') as fp:
bot_profile = pickle.load(fp)
bot_queries = [k for k, v in bot_profile.items()]
with open('Scripted/Processed_Scripts/Chatterbot.pkl', 'rb') as fp:
chatterbot = pickle.load(fp)
chatterbot_queries = [k for k, v in chatterbot.items()]
# LOAD SCRIPT EMBEDDINGS
with open('Scripted/Processed_Scripts/embedded_bot_queries.pkl', 'rb') as fp:
bot_queries_embd = pickle.load(fp)
with open('Scripted/Processed_Scripts/embedded_chatterbot_queries.pkl', 'rb') as fp:
chatterbot_queries_embd = pickle.load(fp)
# Load Dialog Acts Classifer
with open("Classifier/data/processed_data.pkl", "rb") as fp:
data = pickle.load(fp)
labels2idx = data["labels2idx"]
idx2labels = {v: k for k, v in labels2idx.items()}
with T.no_grad():
dialog_act_classifier = Classifier(
D=bot_queries_embd.shape[-1], classes_num=len(labels2idx)).cuda()
checkpoint = T.load("Classifier/Model_Backup/model.pt")
dialog_act_classifier.load_state_dict(checkpoint['model_state_dict'])
dialog_act_classifier = dialog_act_classifier.eval()
# Load TTS model
with T.no_grad():
text2speech = tts_class()
# LOAD DialoGPT Generator
with T.no_grad():
tokenizer = GPT2Tokenizer.from_pretrained('Generator/DialoGPT/Configs/')
weights = T.load('Generator/DialoGPT/Parameters/medium_ft.pkl')
weights_reverse = T.load('Generator/DialoGPT/Parameters/small_reverse.pkl')
cfg = GPT2Config.from_json_file('Generator/DialoGPT/Configs/config.json')
model = GPT2LMHeadModel(cfg)
model_reverse = GPT2LMHeadModel(cfg)
# fix misused key value
weights["lm_head.weight"] = weights["lm_head.decoder.weight"]
weights.pop("lm_head.decoder.weight", None)
weights_reverse["lm_head.weight"] = weights_reverse["lm_head.decoder.weight"]
weights_reverse.pop("lm_head.decoder.weight", None)
model.load_state_dict(weights)
model.to('cuda')
model.eval()
model_reverse.load_state_dict(weights_reverse)
model_reverse.to('cuda')
model_reverse.eval()
with tf.device("/cpu:0"):
# Hub Models
ConvRT_model = encoder_client.EncoderClient(
"Sentence_Encoder/Embeddings/ConvRT", use_extra_context=True)
USE_QA_model = hub.load("Sentence_Encoder/Embeddings/USE_QA/")
# %%
command_codes = ["<PASS>", "<JOKE>", "<GENERATE>",
"<INITIATE>", "<TIL>", "<STORY>", "<SHOWER>", "<STOP>"]
code_map = {"<INITIATE>": ["Scripted/Random_Reddit_Data/nostupidq.csv",
"Scripted/Random_Reddit_Data/jokesq.csv",
"Scripted/Random_Reddit_Data/showerthoughtsq.csv",
"Scripted/Random_Reddit_Data/tilq.csv"],
"<TIL>": ["Scripted/Random_Reddit_Data/tilq.csv"],
"<SHOWER>": ["Scripted/Random_Reddit_Data/showerthoughtsq.csv"],
"<STORY>": ["Scripted/Random_Reddit_Data/writingpromptsa.csv"],
"<JOKE>": ["Scripted/Random_Reddit_Data/jokesq.csv"]}
def random_response(candidates, conversation_history, p=None):
loop = 5
if p is None:
response = random.choice(candidates)
else:
response = np.random.choice(candidates, p=p)
i = 0
while response in conversation_history:
if p is None:
response = random.choice(candidates)
else:
response = np.random.choice(candidates, p=p)
i += 1
if i > loop:
break
return response
# %%
def load_random_reddit(directory, conversation_history):
candidates = []
with open(directory, newline='') as csvfile:
csv_reader = csv.DictReader(csvfile)
for i, row in enumerate(csv_reader):
if 'writing' in directory:
parent_id = str(row['parent_id'])[3:]
thread_id = str(row['link_id'])[3:]
if parent_id == thread_id:
candidate = str(row["body"])
else:
candidate = str(row["title"])
if 'joke' in directory:
candidate += ".... "+str(row['selftext'])
candidates.append(candidate)
return random_response(candidates, conversation_history)
# extract top candidates (queries or responses)
def top_candidates(candidates, scores, top=1):
sorted_score_idx = np.flip(np.argsort(scores), axis=-1)
candidates = [candidates[i] for i in sorted_score_idx.tolist()]
scores = [scores[i] for i in sorted_score_idx.tolist()]
return candidates[0:top], scores[0:top], sorted_score_idx.tolist()
# %%
def generate(texts, past):
candidates, _ = DialoGPT_Generate(texts, model, tokenizer)
return candidates, past
# START DOING STUFF
conversation_history = []
past = None
stop_flag = 0
print("\n")
while True:
utterance = input("Say Something: ") # ,hello how are ya today"
utils.delay_print("\nThinking......")
candidates = []
temp_candidates = []
temp_scores = []
if not conversation_history:
query_context = []
response_context = [""]
else:
if len(conversation_history) > 5:
truncated_history = copy.deepcopy(conversation_history[-5:])
else:
truncated_history = copy.deepcopy(conversation_history)
response_context = [conversation_history[-1]]
# ConveRT needs reversed Context, not sure about USE QA but assuming it's not reverse
query_context = [stuff for stuff in truncated_history]
query_encoding = query_encode([utterance], USE_QA_model, ConvRT_model, [query_context])
if conversation_history:
if len(conversation_history) > 5:
truncated_history = conversation_history[-5:]
else:
truncated_history = conversation_history
generated_responses, past = generate(truncated_history+[utterance], past)
else:
generated_responses, past = generate([utterance], past)
bot_cosine_scores = utils.cosine_similarity_nd(query_encoding, bot_queries_embd)
bot_queries_, bot_cosine_scores_, _ = top_candidates(bot_queries, bot_cosine_scores, top=1)
active_codes = []
bot_candidates = bot_profile[bot_queries_[0]]
filtered_bot_candidates = []
for candidate in bot_candidates:
flag = 0
for code in command_codes:
if code in candidate:
active_codes.append(code)
candidate = candidate.replace(code, "")
filtered_bot_candidates.append(candidate)
flag = 1
break
if flag == 0:
candidates.append(candidate)
filtered_bot_candidates.append(candidate)
active_codes.append("")
with T.no_grad():
logits = dialog_act_classifier(T.tensor(query_encoding).to(device))
_, sorted_idx = T.sort(logits, dim=-1, descending=True)
sorted_idx = sorted_idx.squeeze(0)
sorted_idx = sorted_idx[0:2].cpu().tolist()
labels = [idx2labels[i] for i in sorted_idx]
# print(labels)
"""
Possible Dialog Acts:
['nonsense', 'dev_command', 'open_question_factual', 'appreciation', 'other_answers', 'statement', \
'respond_to_apology', 'pos_answer', 'closing', 'comment', 'neg_answer', 'yes_no_question', 'command', \
'hold', 'NULL', 'back-channeling', 'abandon', 'opening', 'other', 'complaint', 'opinion', 'apology', \
'thanking', 'open_question_opinion']
"""
if bot_cosine_scores_[0] >= 0.75:
response, id = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
filtered_bot_candidates,
response_context,
conversation_history)
code = active_codes[id]
if code in code_map:
directories = code_map[code]
directory = random.choice(directories)
response += " "+load_random_reddit(directory, conversation_history)
elif code == "<GENERATE>":
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
elif code == "<STOP>":
stop_flag = 1
elif stop_flag != 1:
mode = "DEFAULT"
bias = None
if 'open_question_factual' in labels \
or ('yes_no_question' in labels and 'NULL' not in labels) \
or 'open_question_opinion' in labels or 'command' in labels:
bias = 0.07 # biases towards retrieval
elif "apology" in labels:
mode = "BREAK"
candidates = ["Apology accepted.", "No need to apologize.",
"No worries.", "You are forgiven"]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history)
elif "abandon" in labels or "nonsense" in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
candidates = ["what?", "Can you rephrase what you mean?",
"What do you mean exactly?"]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
elif 'hold' in labels:
mode = "BREAK"
candidates = ["Do you want to add something more?",
"I think you want to say something more."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
elif 'closing' in labels:
mode = "BREAK"
candidates = ["Nice talking to you.", "Goodbye.", "See you later."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history)
stop_flag = 1
elif 'opening' in labels:
mode = "BREAK"
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
stop_flag = 1
elif 'thanking' in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
candidates = ["No need to mention", "You are welcome."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
elif 'apology' in labels:
mode = "BREAK"
candidates = ["Apology accepted.", "Apology granted",
"No Worries!", "No need to apologize."]
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses+candidates,
response_context,
conversation_history)
elif 'response_to_apology' in labels\
or 'pos_answer' in labels or 'neg_answer' in labels\
or 'appreciation' in labels or 'back_channeling' in labels:
mode = np.random.choice(["BREAK", "INITIATE"], p=[0.6, 0.4])
if mode == "BREAK":
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
generated_responses,
response_context,
conversation_history)
else:
directories = code_map['<INITIATE>']
directory = random.choice(directories)
response = load_random_reddit(directory, conversation_history)
if mode != "BREAK":
chatterbot_cosine_scores = utils.cosine_similarity_nd(
query_encoding, chatterbot_queries_embd)
chatterbot_queries_, chatterbot_cosine_scores_, _ = top_candidates(
chatterbot_queries, chatterbot_cosine_scores, top=1)
candidates += chatterbot[chatterbot_queries_[0]]
#print("\n\nABOUT TO BE RETRIEVED\n\n")
retrieved_candidates = retrieve(
conn, c, idx, index, query_encoding, query_context)
#print("\n\nABOUT TO BE RETRIEVED\n\n")
if bias is not None:
biases = [0.0 for _ in candidates]
for _ in generated_responses:
biases.append(0.0)
for _ in retrieved_candidates:
biases.append(bias)
biases = np.asarray(biases, np.float32)
else:
biases = None
candidates += generated_responses + retrieved_candidates
response, _ = rank_and_choose(USE_QA_model, ConvRT_model,
tokenizer,
model_reverse,
utterance,
query_encoding,
candidates,
response_context,
conversation_history,
bias=biases)
print("\n")
if len(str(response).split(" ")) <= 100:
if flags.voice:
entry = utils.simple_preprocess(str(response).lower(),
for_speech=True,
return_tokenized=True)
entry = " ".join(entry)
wavefiles = text2speech.process(entry)
def f1():
utils.delay_print("Bot: "+response)
def f2():
text2speech.play(wavefiles)
p1 = Process(target=f1)
p2 = Process(target=f2)
p1.start()
p2.start()
p1.join()
p2.join()
else:
utils.delay_print("Bot: "+response)
else:
utils.delay_print("Bot: "+response, t=0.01)
print("\n")
conversation_history.append(utterance)
conversation_history.append(response)
if stop_flag == 1:
break
# break
|
susi_loop.py
|
"""
Processing logic of susi_linux
"""
import time
import os
import re
import logging
import queue
from threading import Thread, Timer, current_thread
from datetime import datetime
from urllib.parse import urljoin
import speech_recognition as sr
import requests
import json_config
import json
import speech_recognition
from speech_recognition import Recognizer, Microphone
# from requests.exceptions import ConnectionError
import susi_python as susi
from .hardware_components.lights import lights
from .internet_test import internet_on
from .action_scheduler import ActionScheduler
from .player import player
from susi_config import SusiConfig
from .speech import TTS
logger = logging.getLogger(__name__)
try:
import RPi.GPIO as GPIO
except ImportError:
logger.warning("This device doesn't have GPIO port")
GPIO = None
class SusiLoop():
"""The main SUSI loop dealing with hotword detection, voice recognition,
server communication, action processing, etc"""
def __init__(self, renderer=None):
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
except RuntimeError as e:
logger.error(e)
thread1 = Thread(target=self.server_checker, name="ServerCheckerThread")
thread1.daemon = True
thread1.start()
recognizer = Recognizer()
# this was False in the old state machine, but reading the API docs
# https://github.com/Uberi/speech_recognition/blob/master/reference/library-reference.rst
# it seems that True is actually better!
recognizer.dynamic_energy_threshold = True
recognizer.energy_threshold = 2000
self.recognizer = recognizer
self.susi = susi
self.renderer = renderer
self.server_url = "https://127.0.0.1:4000"
self.action_schduler = ActionScheduler()
self.action_schduler.start()
self.event_queue = queue.Queue()
self.idle = True
self.supported_languages = None
try:
res = requests.get('http://ip-api.com/json').json()
self.susi.update_location(
longitude=res['lon'], latitude=res['lat'],
country_name=res['country'], country_code=res['countryCode'])
except ConnectionError as e:
logger.error(e)
self.susi_config = SusiConfig()
self.lang = self.susi_config.get('language')
self.path_base = self.susi_config.get('path.base')
self.sound_detection = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.detection')))
self.sound_problem = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.problem')))
self.sound_error_recognition = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.error.recognition')))
self.sound_error_timeout = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.error.timeout')))
if self.susi_config.get('susi.mode') == 'authenticated':
try:
susi.sign_in(email=self.susi_config.get('susi.user'),
password=self.susi_config.get('susi.pass'))
except Exception as e:
logger.error('Some error occurred in login. Check you login details with susi-config.\n%s', e)
if self.susi_config.get('hotword.engine') == 'Snowboy':
from .hotword_engine.snowboy_detector import SnowboyDetector
hotword_model = "susi.pmdl"
if self.susi_config.get('hotword.model'):
logger.debug("Using configured hotword model: " + self.susi_config.get('hotword.model'))
hotword_model = self.susi_config.get('hotword_model')
self.hotword_detector = SnowboyDetector(model=hotword_model)
elif self.susi_config.get('hotword.engine') == 'PocketSphinx':
from .hotword_engine.sphinx_detector import PocketSphinxDetector
self.hotword_detector = PocketSphinxDetector()
elif self.susi_config.get('hotword.engine') == 'None':
self.hotword_detector = None
else:
raise ValueError(f"Unrecognized value for hotword.engine: {self.susi_config.get('hotword.engine')}")
if self.susi_config.get('wakebutton') == 'enabled':
logger.info("Susi has the wake button enabled")
if self.susi_config.get('device') == 'RaspberryPi':
logger.info("Susi runs on a RaspberryPi")
from .hardware_components.rpi_wake_button import RaspberryPiWakeButton
self.wake_button = RaspberryPiWakeButton()
else:
logger.warning("Susi is not running on a RaspberryPi")
self.wake_button = None
else:
logger.warning("Susi has the wake button disabled")
self.wake_button = None
stt = self.susi_config.get('stt')
if stt == 'google' or stt == 'watson' or stt == 'bing':
# for internet based services we assume any language supported
self.supported_languages = None
elif stt == 'pocketsphinx':
ps_data_dir = os.path.join(os.path.dirname(os.path.realpath(speech_recognition.__file__)), "pocketsphinx-data")
self.supported_languages = [ f.name for f in os.scandir(ps_data_dir) if f.is_dir() ]
logger.debug(f"Found supported languages for PocketSphinx: {self.supported_languages}")
elif stt == 'deepspeech-local':
ds_data_dir = os.path.join(os.path.dirname(os.path.realpath(speech_recognition.__file__)), "deepspeech-data")
self.supported_languages = [ f.name for f in os.scandir(ds_data_dir) if f.is_dir() ]
logger.debug(f"Found supported languages for DeepSpeech: {self.supported_languages}")
elif stt == 'vosk':
vosk_data_dir = os.path.join(os.path.dirname(os.path.realpath(speech_recognition.__file__)), "vosk-data")
self.vosk_base_model_dir = vosk_data_dir
self.supported_languages = [ f.name for f in os.scandir(vosk_data_dir) if f.is_dir() ]
logger.debug(f"Found supported languages for Vosk: {self.supported_languages}")
if (not self.lang in self.supported_languages):
self.lang = "en"
from vosk import Model
self.vosk_model = Model(f"{vosk_data_dir}/{self.lang}")
else:
self.supported_languages = None
logger.warn(f"Unknown stt setting: {stt}")
if self.susi_config.get('stt') == 'deepspeech-local':
self.microphone = Microphone(sample_rate=16000)
else:
self.microphone = Microphone()
if self.hotword_detector is not None:
self.hotword_detector.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.wake_button is not None:
self.wake_button.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.renderer is not None:
self.renderer.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.action_schduler is not None:
self.action_schduler.subject.subscribe(
on_next=lambda x: self.queue_event(x))
def queue_event(self, event):
""" queue a delayed event"""
self.event_queue.put(event)
def hotword_listener(self):
""" thread function for listening to the hotword"""
# this function never returns ...
self.hotword_detector.start()
def server_checker(self):
""" thread function for checking the used server being alive"""
response_one = None
test_params = {
'q': 'Hello',
'timezoneOffset': int(time.timezone / 60)
}
while response_one is None:
try:
logger.debug("checking for local server")
url = urljoin(self.server_url, '/susi/chat.json')
response_one = requests.get(url, test_params).result()
api_endpoint = self.server_url
susi.use_api_endpoint(api_endpoint)
except AttributeError:
time.sleep(10)
continue
except ConnectionError:
time.sleep(10)
continue
def start(self, background = False):
""" start processing of audio events """
if self.hotword_detector is not None:
hotword_thread = Thread(target=self.hotword_listener, name="HotwordDetectorThread")
hotword_thread.daemon = True
hotword_thread.start()
if background:
queue_loop_thread = Thread(target=self.queue_loop, name="QueueLoopThread")
queue_loop_thread.daemon = True
queue_loop_thread.start()
else:
self.queue_loop()
def queue_loop(self):
while True:
# block until events are available
ev = self.event_queue.get(block = True)
logger.debug("Got event from event queue, trying to deal with it")
# wait until idle
while True:
logger.debug("Waiting to become idle for planned action")
if not self.idle:
time.sleep(1)
continue
logger.debug("We are idle now ...")
self.idle = False
self.deal_with_answer(ev)
# back from processing
player.restore_softvolume()
if GPIO:
try:
GPIO.output(27, False)
GPIO.output(22, False)
except RuntimeError:
pass
self.idle = True
break
def notify_renderer(self, message, payload=None):
""" notify program renderer """
if self.renderer is not None:
self.renderer.receive_message(message, payload)
def hotword_detected_callback(self):
"""
Callback when the hotword is detected. Does the full processing
logic formerly contained in different states
"""
logger.debug("Entering hotword callback")
# don't do anything if we are already busy
if not self.idle:
logger.debug("Callback called while already busy, returning immediately from callback")
return
logger.debug("We are idle, so work on it!")
self.idle = False
# beep
player.beep(self.sound_detection)
if GPIO:
GPIO.output(22, True)
audio = None
logger.debug("notify renderer for listening")
self.notify_renderer('listening')
with self.microphone as source:
try:
logger.debug("listening to voice command")
audio = self.recognizer.listen(source, timeout=10.0, phrase_time_limit=5)
except sr.WaitTimeoutError:
logger.debug("timeout reached waiting for voice command")
self.deal_with_error('ListenTimeout')
logger.debug("delaying idle setting for 0.05s")
Timer(interval=0.05, function=self.set_idle).start()
return
if GPIO:
GPIO.output(22, False)
lights.off()
lights.think()
try:
logger.debug("Converting audio to text")
value = self.recognize_audio(audio=audio, recognizer=self.recognizer)
logger.debug("recognize_audio => %s", value)
self.notify_renderer('recognized', value)
if self.deal_with_answer(value):
pass
else:
logger.error("Error dealing with answer")
except sr.UnknownValueError as e:
logger.error("UnknownValueError from SpeechRecognition: %s", e)
self.deal_with_error('RecognitionError')
logger.debug("delaying idle setting for 0.05s")
Timer(interval=0.05, function=self.set_idle).start()
return
def set_idle(self):
logger.debug("Switching to idle mode")
self.notify_renderer('idle')
self.idle = True
def __speak(self, text):
"""Method to set the default TTS for the Speaker"""
tts = self.susi_config.get('tts')
if tts == 'google':
TTS.speak_google_tts(text)
elif tts == 'flite':
logger.info("Using flite for TTS") # indication for using an offline music player
TTS.speak_flite_tts(text)
elif tts == 'watson':
TTS.speak_watson_tts(text)
else:
raise ValueError("unknown key for tts", tts)
def recognize_audio(self, recognizer, audio):
"""Use the configured STT method to convert spoken audio to text"""
stt = self.susi_config.get('stt')
lang = self.susi_config.get('language')
# Try to adjust language to what is available
# None indicates any language supported, so use it as is
if self.supported_languages is not None:
if len(self.supported_languages) == 0:
raise ValueError(f"No supported language for the current STT {stt}")
if "en-US" in self.supported_languages:
default = "en-US"
else:
default = self.supported_languages[0]
if lang not in self.supported_languages:
if len(lang) < 2:
logger.warn(f"Unsupported language code {lang}, using {default}")
lang = default
else:
langshort = lang[0:2].lower()
for l in self.supported_languages:
if langshort == l[0:2].lower():
logger.debug(f"Using language code {l} instead of {lang}")
lang = l
break
# We should now have a proper language code in lang, if not, warn and reset
if lang not in self.supported_languages:
logger.warn(f"Unsupported langauge code {lang}, using {default}")
lang = default
logger.info("Trying to recognize audio with %s in language: %s", stt, lang)
if stt == 'google':
return recognizer.recognize_google(audio, language=lang)
elif stt == 'watson':
username = self.susi_config.get('watson.stt.user')
password = self.susi_config.get('watson.stt.pass')
return recognizer.recognize_ibm(
username=username, password=password, language=lang, audio_data=audio)
elif stt == 'pocket_sphinx':
return recognizer.recognize_sphinx(audio, language=lang)
elif stt == 'bing':
api_key = self.susi_config.get('bing.api')
return recognizer.recognize_bing(audio_data=audio, key=api_key, language=lang)
elif stt == 'deepspeech-local':
return recognizer.recognize_deepspeech(audio, language=lang)
elif stt == 'vosk':
# TODO language support not implemented, we always use
# the first language
recognizer.vosk_model = self.vosk_model
ret = json.loads(recognizer.recognize_vosk(audio, language=lang))
if ("text" in ret):
return ret["text"]
else:
logger.error("Cannot detect text")
return ""
else:
logger.error(f"Unknown STT setting: {stt}")
logger.error("Using DeepSpeech!")
return recognizer.recognize_deepspeech(audio, language=lang)
def deal_with_error(self, payload=None):
"""deal with errors happening during processing of audio events"""
if payload == 'RecognitionError':
logger.debug("ErrorState Recognition Error")
self.notify_renderer('error', 'recognition')
lights.speak()
player.say(self.sound_error_recognition)
lights.off()
elif payload == 'ConnectionError':
self.notify_renderer('error', 'connection')
self.susi_config.set('tts', 'flite')
self.susi_config.set('stt', 'pocketsphinx')
print("Internet Connection not available")
lights.speak()
lights.off()
logger.info("Changed to offline providers")
elif payload == 'ListenTimeout':
self.notify_renderer('error', 'timeout')
lights.speak()
player.say(self.sound_error_timeout)
lights.off()
else:
print("Error: {} \n".format(payload))
self.notify_renderer('error')
lights.speak()
player.say(self.sound_problem)
lights.off()
def deal_with_answer(self, payload=None):
"""processing logic - how to deal with answers from the server"""
try:
no_answer_needed = False
if isinstance(payload, str):
logger.debug("Sending payload to susi server: %s", payload)
reply = self.susi.ask(payload)
else:
logger.debug("Executing planned action response: %s", payload)
reply = payload
if GPIO:
GPIO.output(27, True)
self.notify_renderer('speaking', payload={'susi_reply': reply})
if 'planned_actions' in reply.keys():
logger.debug("planning action: ")
for plan in reply['planned_actions']:
logger.debug("plan = " + str(plan))
# plan answers look like this:
# plan = {'planned_actions': [{'language': 'en', 'answer': 'ALARM', 'plan_delay': 300001,
# 'plan_date': '2020-01-09T02:05:10.377Z'}], 'language': 'en', 'answer': 'alarm set for in 5 minutes'}
# we use time.time as timefunc for scheduler, so we need to convert the
# delay and absolute time to the same format, that is float of sec since epoch
# Unfortunately, Python is tooooooo stupid to provide ISO standard confirm standard
# library. datetime.fromisoformat sounds like perfectly made, only that it doesn't
# parse the Z postfix, congratulations.
# https://discuss.python.org/t/parse-z-timezone-suffix-in-datetime/2220
# Replace it manually with +00:00
# We send both the delay and absolute time in case one of the two is missing
# the scheduler prefers the delay value
plan_date_sec = datetime.fromisoformat(re.sub('Z$', '+00:00', plan['plan_date'])).timestamp()
self.action_schduler.add_event(int(plan['plan_delay']) / 1000, plan_date_sec, plan)
# first responses WITHOUT answer key!
# {'answer': 'Audio volume is now 10 percent.', 'volume': '10'}
if 'volume' in reply.keys():
no_answer_needed = True
player.volume(reply['volume'])
player.say(self.sound_detection)
if 'media_action' in reply.keys():
action = reply['media_action']
if action == 'pause':
no_answer_needed = True
player.pause()
lights.off()
lights.wakeup()
elif action == 'resume':
no_answer_needed = True
player.resume()
elif action == 'restart':
no_answer_needed = True
player.restart()
elif action == 'next':
no_answer_needed = True
player.next()
elif action == 'previous':
no_answer_needed = True
player.previous()
elif action == 'shuffle':
no_answer_needed = True
player.shuffle()
else:
logger.error('Unknown media action: %s', action)
# {'stop': <susi_python.models.StopAction object at 0x7f4641598d30>}
if 'stop' in reply.keys():
no_answer_needed = True
player.stop()
if 'answer' in reply.keys():
logger.info('Susi: %s', reply['answer'])
lights.off()
lights.speak()
self.__speak(reply['answer'])
lights.off()
else:
if not no_answer_needed and 'identifier' not in reply.keys():
lights.off()
lights.speak()
self.__speak("I don't have an answer to this")
lights.off()
if 'language' in reply.keys():
answer_lang = reply['language']
if answer_lang != self.susi_config.get("language"):
logger.info("Switching language to: %s", answer_lang)
# switch language
self.susi_config.set('language', answer_lang)
# TODO
# for vosk we need to update self.vosk_model = Model(f"{self.vosk_model_base}/{answer_lang}")
# given that the language is supported!
# answer to "play ..."
# {'identifier': 'ytd-04854XqcfCY', 'answer': 'Playing Queen - We Are The Champions (Official Video)'}
if 'identifier' in reply.keys():
url = reply['identifier']
logger.debug("Playing " + url)
if url[:3] == 'ytd':
player.playytb(url[4:])
else:
player.play(url)
if 'table' in reply.keys():
table = reply['table']
for h in table.head:
print('%s\t' % h, end='')
self.__speak(h)
print()
for datum in table.data[0:4]:
for value in datum:
print('%s\t' % value, end='')
self.__speak(value)
print()
if 'rss' in reply.keys():
rss = reply['rss']
entities = rss['entities']
count = rss['count']
for entity in entities[0:count]:
logger.debug(entity.title)
self.__speak(entity.title)
except ConnectionError:
self.deal_with_error('ConnectionError')
return False
except Exception as e:
logger.error('Unknown error: %s', e)
return False
return True
|
pgyr.py
|
#Hecho por Max Serrano
#Sistop-2020-1
#Ejercicio: gatos y ratones
#Lenguaje: Python version 3.7
import threading
import time
import random
mutex1 = threading.Semaphore(1) # protege linea_de_platos
mutex2 = threading.Semaphore(1) # protege a "comer"
platos = threading.Semaphore(0)# protege el plato
l_platos = []#platos de comida generados y guardados
gyr = []#guarda los gatos y ratones comiendo
comer = 0#saber si alguien come
c_comen = 1#no pueden comer 1 a la vez
comiendo = threading.Semaphore(0)#quien come
p_vacio = 0 #platos que ya estan vacios
class plato:
def __init__(self):
self.plato = random.random()
print ("plato con comida # %1.3f" % self.plato)
time.sleep(self.plato*1/3)
def vacio(self):
numero=self.plato
time.sleep(self.plato*2/3)
return(numero)
class mushus:
def __init__(self):
self.mushus = random.random()
print ("Generando al gato # %1.3f" % self.mushus)
time.sleep(self.mushus)
def numero(self):
numero=self.mushus
return(numero)
class ratones:
def __init__(self):
self.ratones = random.random()
print ("Generando al raton # %1.3f" % self.ratones)
time.sleep(self.ratones)
def numero(self):
numero=self.ratones
return(numero)
def hay_plato():
global comer
global c_comen
global p_vacio
while True:
numero = plato().vacio()
evento = plato()#plato con comida
mutex2.acquire()
if comer < 0:#variable comer -
comer=0
if comer == c_comen:
mutex2.release()
if(p_vacio != 0):# platos vacios se indica
print ("¡Alguien se comio el plato (%1.3f)" % p_vacio)
comiendo.acquire()
else:
mutex2.release()
print ("Alguien quiere comer (%1.3f)" % numero)
comer += 1
mutex1.acquire()
l_platos.append(evento)
if (len(gyr) != 0):
animales=gyr.pop()#se saca al animal que ya comio
mutex1.release()
platos.release()#se libera el semaforo porque ya hay un plato disponible
def gato():
global comer
global c_comen
global p_vacio
while True:
numero = plato().vacio()
evento=mushus()#se generan gatos
animal = mushus().numero()
platos.acquire() #se trae el semaforo para comer
mutex2.acquire()
if comer == c_comen:#si pueden comer
print ("\tSoy un gato (%1.3f)y voy a comer este(%1.3f)"%(animal, numero))
p_vacio = numero#para saber que plato esta vacio
comiendo.release()
mutex2.release()
mutex1.acquire()
comer -= 1
gyr.append(evento)#se agrega al gato generado
plat = l_platos.pop()#se saca el plato vacio
mutex1.release()
def raton():
global comer
global c_comen
global p_vacio
while True:
numero = plato().vacio()
evento= ratones()#se generan ratones
animal=ratones().numero()
platos.acquire()#se trae el semaforo para comer
mutex2.acquire()
if comer == c_comen:#si pueden comer
print ("\t\tSoy el RATON (%1.3f) y voy a comer este(%1.3f)" % (animal, numero))
p_vacio = numero#para saber que plato fue comido
comiendo.release()
mutex2.release()
mutex1.acquire()
comer -= 1
gyr.append(evento)#se agrega al raton
plat = l_platos.pop()#se saca el plato vacio
mutex1.release()
#iniciando hilos
threading.Thread(target=hay_plato, args=[]).start()
threading.Thread(target=gato, args=[]).start()
threading.Thread(target=raton, args=[]).start()
|
mainloop.py
|
import gyro
import gpstest as gps
import ArduinoSlave as mc;
import threading
import time
import servotest as servo;
from model_static import sail
from os import path
def log_sensor_data():
prev = time.time();
output = "sensorlogs/"
#Round down to previous 5 minutes.
prev = prev - prev%300;
if (not path.exists(output + "{:.0f}.txt".format(prev))):
with open(output + "{:.0f}.txt".format(prev),'w') as f:
f.write("windangle windspeed sailpos rudderpos\tgyrox gyroy gyroz accelx accely accelz accelxnorm accelynorm accelznorm xrot yrot temperature\tlat lon\n")
with open(output + "{:.0f}.txt".format(prev),'a') as f:
line = ""
for i in mc.get_values():
line += str(i) + " "
line += "\t"
line += gyro.get_values() + "\t"
line += gps.get_values();
f.write(line + '\n')
gyro_t = threading.Thread(target=gyro.__sense__, daemon=True);
gps_t = threading.Thread(target=gps.__sense__, daemon=True);
mc_t = threading.Thread(target=mc.__sense__, daemon=True);
gyro_t.start();
gps_t.start();
mc_t.start();
manual = True;
while(1):
time.sleep(5)
#gyro.printvalues();
#gpstest.print_pos();
if (manual):
x = mc.getx()
y = mc.gety()
servo.set_rudder(y/255*100);
#servo.set_sail(x/255*100);
log_sensor_data();
else:
wind_dir = mc.get_values()[0];
boat_angle = gyro.get_angle();
target_lat = -75.477791
target_lon = 40.613953
[r, s] = sail(gps.lat, gps.lon, target_lat, target_lon, wind_dir, boat_angle);
servo.set_rudder(r/18*10)
#servo.set_sail(s);
|
part_test.py
|
import sys
import threading
sys.path.append('../../common')
from env_indigo import *
def outPrint(str, pid, output):
#output = None
if output == None:
print(str)
else:
old_out = output[pid]
output[pid] = '{0}\n{1}'.format(old_out, str)
def insertSmi(db, pid, input_smi, output=None):
index = 0
wrongStructures = 0
#outPrint('Inserting molecules from:{1}'.format(pid, input_smi), pid, output)
smi_path = joinPathPy(os.path.join('molecules', input_smi), __file__)
for mol in indigo.iterateSmilesFile(smi_path):
try:
db.insert(mol)
except(BingoException, e):
#outPrint('Structure {0} excluded: {1}'.format(index, getIndigoExceptionText(e)), pid, output)
wrongStructures += 1
index += 1
if index % 1000 == 0:
print('Structures inserted: {0}'.format(index))
#outPrint('Finished indexing {1} structures. {2} wrong structures excluded'.format(pid, index, wrongStructures), pid, output)
def makeSearchSim(db, pid, query, min, max, options, output=None ):
#outPrint('\n\nSimSearch with metric {0}'.format(options.encode('ascii')), pid, output)
search = db.searchSim(query, min, max, options)
cnt = 0
while search.next():
#outPrint('Mol #{0} with sim value {1}'.format(search.getCurrentId(), search.getCurrentSimilarityValue()), pid, output)
cnt = cnt + 1;
#f1=open('sim_out.txt', 'a+')
#f1.write('PID {0}) Total count in db #{1} {2}\n'.format(pid, db, cnt))
outPrint('PID {0}) Total count {1}'.format(pid, cnt), pid, output)
def makeSearchSub(db, pid, query, options, output=None):
#outPrint('\n\nSubSearch:'.format(db), pid, output)
search = db.searchSub(query, options)
cnt = 0
while search.next():
#outPrint('Mol #{0}'.format(search.getCurrentId()), pid, output)
cnt = cnt + 1
#f1=open('sub_out.txt', 'a+')
#f1.write('PID {0}) Total count in db #{1} {2}\n'.format(pid, db, cnt))
outPrint('PID {0}) Total count {1}'.format(pid, cnt), pid, output)
def makeSearchExact(db, pid, query, options, output=None):
#outPrint('ExactSearch:'.format(db), pid, output)
search = db.searchExact(query, options)
cnt = 0
while search.next():
#outPrint('Mol #{0}'.format(search.getCurrentId()), pid, output)
cnt = cnt + 1
#f1=open('./exact_out.txt', 'a+')
#f1.write('PID {0}) Total count in db #{1} {2}\n'.format(pid, db, cnt))
outPrint('PID {0}) Total count {1}'.format(pid, cnt), pid, output)
def partCreate():
bingo = Bingo.createDatabaseFile(indigo, joinPathPy('mol_part_db', __file__), 'molecule', 'mt_size:2000')
insertSmi(bingo, 0, 'sample_100000.smi')
bingo.close()
def partTest(size, type = 'sub'):
bingo = Bingo.loadDatabaseFile(indigo, joinPathPy('mol_part_db', __file__), '')
index = 0
for m in indigo.iterateSDFile(joinPathPy('molecules/rand_queries_small.sdf', __file__)):
try:
print('\nQuery #{0}'.format(index + 1))
outputs = ['' for i in range(size + 1)]
threads = []
if type == 'sub':
qmol = indigo.loadQueryMolecule(m.rawData())
threads.append(threading.Thread(target=makeSearchSub, args=(bingo, 0, qmol, '', outputs)))
for i in range(1, size + 1):
threads.append(threading.Thread(target=makeSearchSub, args=(bingo, i, qmol, 'part:{0}/{1}'.format(i, size), outputs)))
elif type == 'exact':
qmol = indigo.loadMolecule(m.rawData())
threads.append(threading.Thread(target=makeSearchExact, args=(bingo, 0, qmol, '', outputs)))
for i in range(1, size + 1):
threads.append(threading.Thread(target=makeSearchExact, args=(bingo, i, qmol, 'part:{0}/{1}'.format(i, size), outputs)))
else:
qmol = indigo.loadMolecule(m.rawData())
threads.append(threading.Thread(target=makeSearchSim, args=(bingo, 0, qmol, 0.5, 1, '', outputs)))
for i in range(1, size + 1):
threads.append(threading.Thread(target=makeSearchSim, args=(bingo, i, qmol, 0.5, 1, 'part:{0}/{1}'.format(i, size), outputs)))
for t in threads:
t.start()
for t in threads:
t.join()
for out in outputs:
print(out)
except BingoException as e:
print('Query {0} fail: {1}'.format(getIndigoExceptionText(e)))
index += 1
bingo.close()
indigo = Indigo()
print('\n2 Database creating..\n')
partCreate()
print('\n Partial similarity search:\n')
partTest(3, 'sim')
print('\n Partial substructure search:\n')
partTest(3, 'sub')
print('\n Partial exact search:\n')
partTest(3, 'exact')
|
setup.py
|
#!/usr/bin/env python
#encoding: utf8
import os
import re
import sys
from setuptools import setup
from setuptools import find_packages
from setuptools.command.test import test as TestCommand
try:
import colorama
colorama.init()
from colorama import Fore
RESET = Fore.RESET
GREEN = Fore.GREEN
RED = Fore.RED
except ImportError, e:
RESET = ''
GREEN = ''
RED = ''
v = open(os.path.join(os.path.dirname(__file__), 'spyne', '__init__.py'), 'r')
VERSION = re.match(r".*__version__ = '(.*?)'", v.read(), re.S).group(1)
LONG_DESC = """Spyne aims to save the protocol implementers the hassle of
implementing their own remote procedure call api and the application programmers
the hassle of jumping through hoops just to expose their services using multiple
protocols and transports.
"""
try:
os.stat('CHANGELOG.rst')
LONG_DESC += "\n\n" + open('CHANGELOG.rst', 'r').read()
except OSError:
pass
SHORT_DESC="""A transport and architecture agnostic rpc library that focuses on
exposing public services with a well-defined API."""
def call_test(f, a, tests):
import spyne.test
from glob import glob
from itertools import chain
from multiprocessing import Process, Queue
tests_dir = os.path.dirname(spyne.test.__file__)
a.extend(chain(*[glob("%s/%s" % (tests_dir, test)) for test in tests]))
queue = Queue()
p = Process(target=_wrapper(f), args=[a, queue])
p.start()
p.join()
ret = queue.get()
if ret == 0:
print tests, "OK"
else:
print tests, "FAIL"
return ret
def _wrapper(f):
def _(args, queue):
try:
retval = f(args)
except TypeError: # it's a pain to call trial.
sys.argv = ['trial']
sys.argv.extend(args)
retval = f()
queue.put(retval)
return _
_ctr = 0
def call_pytest(*tests):
global _ctr
import pytest
_ctr += 1
file_name = 'test_result.%d.xml' % _ctr
if os.path.isfile(file_name):
os.unlink(file_name)
return call_test(pytest.main, ['-v', '--tb=short', '--junitxml=%s' % file_name], tests)
def call_trial(*tests):
from twisted.scripts.trial import usage
from twisted.scripts.trial import Options
from twisted.scripts.trial import _makeRunner
from twisted.scripts.trial import _getSuite
def run():
config = Options()
config.parseOptions()
trialRunner = _makeRunner(config)
suite = _getSuite(config)
test_result = trialRunner.run(suite)
return int(not test_result.wasSuccessful())
return call_test(run, [], tests)
class RunTests(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
print "running tests"
ret = 0
ret = call_pytest('interface', 'model', 'protocol',
'test_null_server.py', 'test_service.py',
'test_soft_validation.py', 'test_util.py') or ret
ret = call_pytest('test_sqlalchemy.py') or ret
ret = call_pytest('interop/test_httprpc.py') or ret
ret = call_pytest('interop/test_soap_client_http.py') or ret
ret = call_pytest('interop/test_soap_client_zeromq.py') or ret
ret = call_pytest('interop/test_suds.py') or ret
ret = call_trial('interop/test_soap_client_http_twisted.py') or ret
if ret == 0:
print GREEN + "All that glisters is not gold." + RESET
else:
print RED + "Something is rotten in the state of Denmark." + RESET
raise SystemExit(ret)
test_reqs = [
'pytest', 'werkzeug', 'sqlalchemy', 'suds',
'pyparsing<1.99', 'lxml>=2.3', 'pyyaml', 'pyzmq', 'twisted', 'colorama',
'msgpack-python', 'psycopg2', 'webtest',
]
setup(
name='spyne',
packages=find_packages(),
version=VERSION,
description=SHORT_DESC,
long_description=LONG_DESC,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent',
'Natural Language :: English',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords=('soap', 'wsdl', 'wsgi', 'zeromq', 'rest', 'rpc', 'json', 'http',
'msgpack', 'xml', 'django', 'pyramid', 'postgresql', 'sqlalchemy',
'werkzeug', 'twisted', 'yaml'),
author='Burak Arslan',
author_email='[email protected]',
maintainer='Burak Arslan',
maintainer_email='[email protected]',
url='http://spyne.io',
license='LGPL-2.1',
zip_safe=False,
install_requires=[
'pytz',
],
entry_points={
'console_scripts': [
'sort_wsdl=spyne.test.sort_wsdl:main',
]
},
tests_require = test_reqs,
cmdclass = {'test': RunTests},
)
|
test_logging.py
|
# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = threading_helper.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
threading_helper.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
asyncore.loop(poll_interval, map=self._map)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self.close()
threading_helper.join_thread(self._thread)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
threading_helper.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
t = threading.Thread(target=self.removeTarget)
t.daemon = True
t.start()
target = MockRaceConditionHandler(self.mem_hdlr)
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
threading_helper.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
'custom': {
'custom': 1234
}
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises(exception, *args, **kwargs)
except exception as e:
self.assertEqual(message, e.message)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: unmatched '{' in format spec",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_defaults_parameter(self):
fmts = ['%(custom)s %(message)s', '{custom} {message}', '$custom $message']
styles = ['%', '{', '$']
for fmt, style in zip(fmts, styles):
f = logging.Formatter(fmt, style=style, defaults={'custom': 'Default'})
r = self.get_record()
self.assertEqual(f.format(r), 'Default Message with 2 placeholders')
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
# Without default
f = logging.Formatter(fmt, style=style)
r = self.get_record()
self.assertRaises(ValueError, f.format, r)
# Non-existing default is ignored
f = logging.Formatter(fmt, style=style, defaults={'Non-existing': 'Default'})
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.rename(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root',
'threading'}
support.check__all__(self, logging, not_exported=not_exported)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
test_multiprocessing.py
|
#!/usr/bin/env python3
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import time
import io
import sys
import os
import gc
import signal
import array
import socket
import random
import logging
import test.support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
#
#
#
def latin(s):
return s.encode('latin')
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
self.skipTest("requires 'queue.task_done()' method")
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporaily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), True)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(1000)))
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=53)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_make_pool(self):
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
countdown = 5
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print(self.manager._debug_info())
print(debug_info)
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call mutliprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = [
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.reduction',
'multiprocessing.synchronize', 'multiprocessing.util'
]
if c_int is not None:
# This module requires _ctypes
modules.append('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type.capitalize()
for name in list(glob.keys()):
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _ThisSubProcess(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _TestProcess(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,))
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_TestProcess, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!")
if run is None:
from test.support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
async.py
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
from multiprocessing import Process
import threading
__author__ = 'Allen Woo'
'''
decorators
'''
def async_thread(f):
'''
multiprocessing Process
:param f:
:return:
'''
def wrapper(*args, **kwargs):
t =threading.Thread(target=f,args=args, kwargs = kwargs)
t.start()
return wrapper
def async_process(f):
'''
multiprocessing Process
:param f:
:return:
'''
def wrapper(*args, **kwargs):
thr = Process(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
coref_model.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import operator
import random
import math
import json
import threading
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import h5py
import util
import coref_ops
import conll
import metrics
class CorefModel(object):
def __init__(self, config):
self.config = config
self.context_embeddings = util.EmbeddingDictionary(config["context_embeddings"])
self.head_embeddings = util.EmbeddingDictionary(config["head_embeddings"], maybe_cache=self.context_embeddings)
self.char_embedding_size = config["char_embedding_size"]
self.char_dict = util.load_char_dict(config["char_vocab_path"])
self.max_span_width = config["max_span_width"]
self.genres = { g:i for i,g in enumerate(config["genres"]) }
if config["lm_path"]:
self.lm_file = h5py.File(self.config["lm_path"], "r")
else:
self.lm_file = None
self.lm_layers = self.config["lm_layers"]
self.lm_size = self.config["lm_size"]
self.eval_data = None # Load eval data lazily.
input_props = []
input_props.append((tf.string, [None, None])) # Tokens.
input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings.
input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.int32, [None])) # Speaker IDs.
input_props.append((tf.int32, [])) # Genre.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None])) # Cluster ids.
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step,
self.config["decay_frequency"], self.config["decay_rate"], staircase=True)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
optimizers = {
"adam" : tf.train.AdamOptimizer,
"sgd" : tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.config["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
def start_enqueue_thread(self, session):
with open(self.config["train_path"]) as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
def _enqueue_loop():
while True:
random.shuffle(train_examples)
for example in train_examples:
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() if "module/" not in v.name]
saver = tf.train.Saver(vars_to_restore)
checkpoint_path = os.path.join(self.config["log_dir"], "model.max.ckpt")
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def load_lm_embeddings(self, doc_key):
if self.lm_file is None:
return np.zeros([0, 0, self.lm_size, self.lm_layers])
file_key = doc_key.replace("/", ":")
group = self.lm_file[file_key]
num_sentences = len(list(group.keys()))
sentences = [group[str(i)][...] for i in range(num_sentences)]
lm_emb = np.zeros([num_sentences, max(s.shape[0] for s in sentences), self.lm_size, self.lm_layers])
for i, s in enumerate(sentences):
lm_emb[i, :s.shape[0], :, :] = s
return lm_emb
def tensorize_mentions(self, mentions):
if len(mentions) > 0:
starts, ends = zip(*mentions)
else:
starts, ends = [], []
return np.array(starts), np.array(ends)
def tensorize_span_labels(self, tuples, label_dict):
if len(tuples) > 0:
starts, ends, labels = zip(*tuples)
else:
starts, ends, labels = [], [], []
return np.array(starts), np.array(ends), np.array([label_dict[c] for c in labels])
def tensorize_example(self, example, is_training):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id + 1
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = util.flatten(example["speakers"])
assert num_words == len(speakers)
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.config["filter_widths"]))
text_len = np.array([len(s) for s in sentences])
tokens = [[""] * max_sentence_length for _ in sentences]
context_word_emb = np.zeros([len(sentences), max_sentence_length, self.context_embeddings.size])
head_word_emb = np.zeros([len(sentences), max_sentence_length, self.head_embeddings.size])
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
tokens[i][j] = word
context_word_emb[i, j] = self.context_embeddings[word]
head_word_emb[i, j] = self.head_embeddings[word]
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
tokens = np.array(tokens)
speaker_dict = { s:i for i,s in enumerate(set(speakers)) }
speaker_ids = np.array([speaker_dict[s] for s in speakers])
doc_key = example["doc_key"]
genre = self.genres[doc_key[:2]]
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
lm_emb = self.load_lm_embeddings(doc_key)
example_tensors = (tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids)
if is_training and len(sentences) > self.config["max_training_sentences"]:
return self.truncate_example(*example_tensors)
else:
return example_tensors
def truncate_example(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids):
max_training_sentences = self.config["max_training_sentences"]
num_sentences = context_word_emb.shape[0]
assert num_sentences > max_training_sentences
sentence_offset = random.randint(0, num_sentences - max_training_sentences)
word_offset = text_len[:sentence_offset].sum()
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
tokens = tokens[sentence_offset:sentence_offset + max_training_sentences, :]
context_word_emb = context_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
head_word_emb = head_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
lm_emb = lm_emb[sentence_offset:sentence_offset + max_training_sentences, :, :, :]
char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
speaker_ids = speaker_ids[word_offset: word_offset + num_words]
gold_spans = np.logical_and(gold_ends >= word_offset, gold_starts < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
def get_candidate_labels(self, candidate_starts, candidate_ends, labeled_starts, labeled_ends, labels):
same_start = tf.equal(tf.expand_dims(labeled_starts, 1), tf.expand_dims(candidate_starts, 0)) # [num_labeled, num_candidates]
same_end = tf.equal(tf.expand_dims(labeled_ends, 1), tf.expand_dims(candidate_ends, 0)) # [num_labeled, num_candidates]
same_span = tf.logical_and(same_start, same_end) # [num_labeled, num_candidates]
candidate_labels = tf.matmul(tf.expand_dims(labels, 0), tf.to_int32(same_span)) # [1, num_candidates]
candidate_labels = tf.squeeze(candidate_labels, 0) # [num_candidates]
return candidate_labels
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def coarse_to_fine_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_span_range = tf.range(k) # [k]
antecedent_offsets = tf.expand_dims(top_span_range, 1) - tf.expand_dims(top_span_range, 0) # [k, k]
antecedents_mask = antecedent_offsets >= 1 # [k, k]
fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.expand_dims(top_span_mention_scores, 0) # [k, k]
fast_antecedent_scores += tf.log(tf.to_float(antecedents_mask)) # [k, k]
fast_antecedent_scores += self.get_fast_antecedent_scores(top_span_emb) # [k, k]
_, top_antecedents = tf.nn.top_k(fast_antecedent_scores, c, sorted=False) # [k, c]
top_antecedents_mask = util.batch_gather(antecedents_mask, top_antecedents) # [k, c]
top_fast_antecedent_scores = util.batch_gather(fast_antecedent_scores, top_antecedents) # [k, c]
top_antecedent_offsets = util.batch_gather(antecedent_offsets, top_antecedents) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def distance_pruning(self, top_span_emb, top_span_mention_scores, c):
k = util.shape(top_span_emb, 0)
top_antecedent_offsets = tf.tile(tf.expand_dims(tf.range(c) + 1, 0), [k, 1]) # [k, c]
raw_top_antecedents = tf.expand_dims(tf.range(k), 1) - top_antecedent_offsets # [k, c]
top_antecedents_mask = raw_top_antecedents >= 0 # [k, c]
top_antecedents = tf.maximum(raw_top_antecedents, 0) # [k, c]
top_fast_antecedent_scores = tf.expand_dims(top_span_mention_scores, 1) + tf.gather(top_span_mention_scores, top_antecedents) # [k, c]
top_fast_antecedent_scores += tf.log(tf.to_float(top_antecedents_mask)) # [k, c]
return top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets
def get_predictions_and_loss(self, tokens, context_word_emb, head_word_emb, lm_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids):
self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
self.lexical_dropout = self.get_dropout(self.config["lexical_dropout_rate"], is_training)
self.lstm_dropout = self.get_dropout(self.config["lstm_dropout_rate"], is_training)
num_sentences = tf.shape(context_word_emb)[0]
print('num_sentences: {}'.format(num_sentences))
max_sentence_length = tf.shape(context_word_emb)[1]
print('max_sentence_length: {}'.format(max_sentence_length))
context_emb_list = [context_word_emb]
head_emb_list = [head_word_emb]
if self.config["char_embedding_size"] > 0:
char_emb = tf.gather(tf.get_variable("char_embeddings", [len(self.char_dict), self.config["char_embedding_size"]]), char_index) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, util.shape(char_emb, 2), util.shape(char_emb, 3)]) # [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = util.cnn(flattened_char_emb, self.config["filter_widths"], self.config["filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb, [num_sentences, max_sentence_length, util.shape(flattened_aggregated_char_emb, 1)]) # [num_sentences, max_sentence_length, emb]
context_emb_list.append(aggregated_char_emb)
head_emb_list.append(aggregated_char_emb)
if not self.lm_file:
elmo_module = hub.Module("https://tfhub.dev/google/elmo/2")
lm_embeddings = elmo_module(
inputs={"tokens": tokens, "sequence_len": text_len},
signature="tokens", as_dict=True)
word_emb = lm_embeddings["word_emb"] # [num_sentences, max_sentence_length, 512]
lm_emb = tf.stack([tf.concat([word_emb, word_emb], -1),
lm_embeddings["lstm_outputs1"],
lm_embeddings["lstm_outputs2"]], -1) # [num_sentences, max_sentence_length, 1024, 3]
lm_emb_size = util.shape(lm_emb, 2)
lm_num_layers = util.shape(lm_emb, 3)
with tf.variable_scope("lm_aggregation"):
self.lm_weights = tf.nn.softmax(tf.get_variable("lm_scores", [lm_num_layers], initializer=tf.constant_initializer(0.0)))
self.lm_scaling = tf.get_variable("lm_scaling", [], initializer=tf.constant_initializer(1.0))
flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers])
flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights, 1)) # [num_sentences * max_sentence_length * emb, 1]
aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size])
aggregated_lm_emb *= self.lm_scaling
context_emb_list.append(aggregated_lm_emb)
context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb]
head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb]
context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]
head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length]
context_outputs = self.lstm_contextualize(context_emb, text_len, text_len_mask) # [num_words, emb]
num_words = util.shape(context_outputs, 0)
genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb]
sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length]
flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
flattened_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num_words]
candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1), [1, self.max_span_width]) # [num_words, max_span_width]
candidate_ends = candidate_starts + tf.expand_dims(tf.range(self.max_span_width), 0) # [num_words, max_span_width]
candidate_start_sentence_indices = tf.gather(flattened_sentence_indices, candidate_starts) # [num_words, max_span_width]
candidate_end_sentence_indices = tf.gather(flattened_sentence_indices, tf.minimum(candidate_ends, num_words - 1)) # [num_words, max_span_width]
candidate_mask = tf.logical_and(candidate_ends < num_words, tf.equal(candidate_start_sentence_indices, candidate_end_sentence_indices)) # [num_words, max_span_width]
flattened_candidate_mask = tf.reshape(candidate_mask, [-1]) # [num_words * max_span_width]
candidate_starts = tf.boolean_mask(tf.reshape(candidate_starts, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_ends = tf.boolean_mask(tf.reshape(candidate_ends, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_sentence_indices = tf.boolean_mask(tf.reshape(candidate_start_sentence_indices, [-1]), flattened_candidate_mask) # [num_candidates]
candidate_cluster_ids = self.get_candidate_labels(candidate_starts, candidate_ends, gold_starts, gold_ends, cluster_ids) # [num_candidates]
candidate_span_emb = self.get_span_emb(flattened_head_emb, context_outputs, candidate_starts, candidate_ends) # [num_candidates, emb]
candidate_mention_scores = self.get_mention_scores(candidate_span_emb) # [k, 1]
candidate_mention_scores = tf.squeeze(candidate_mention_scores, 1) # [k]
k = tf.to_int32(tf.floor(tf.to_float(tf.shape(context_outputs)[0]) * self.config["top_span_ratio"]))
top_span_indices = coref_ops.extract_spans(tf.expand_dims(candidate_mention_scores, 0),
tf.expand_dims(candidate_starts, 0),
tf.expand_dims(candidate_ends, 0),
tf.expand_dims(k, 0),
util.shape(context_outputs, 0),
True) # [1, k]
top_span_indices.set_shape([1, None])
top_span_indices = tf.squeeze(top_span_indices, 0) # [k]
top_span_starts = tf.gather(candidate_starts, top_span_indices) # [k]
top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k]
top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb]
top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k]
top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k]
top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k]
top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k]
c = tf.minimum(self.config["max_top_antecedents"], k)
if self.config["coarse_to_fine"]:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.coarse_to_fine_pruning(top_span_emb, top_span_mention_scores, c)
else:
top_antecedents, top_antecedents_mask, top_fast_antecedent_scores, top_antecedent_offsets = self.distance_pruning(top_span_emb, top_span_mention_scores, c)
dummy_scores = tf.zeros([k, 1]) # [k, 1]
for i in range(self.config["coref_depth"]):
with tf.variable_scope("coref_layer", reuse=(i > 0)):
top_antecedent_emb = tf.gather(top_span_emb, top_antecedents) # [k, c, emb]
top_antecedent_scores = top_fast_antecedent_scores + self.get_slow_antecedent_scores(top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb) # [k, c]
top_antecedent_weights = tf.nn.softmax(tf.concat([dummy_scores, top_antecedent_scores], 1)) # [k, c + 1]
top_antecedent_emb = tf.concat([tf.expand_dims(top_span_emb, 1), top_antecedent_emb], 1) # [k, c + 1, emb]
attended_span_emb = tf.reduce_sum(tf.expand_dims(top_antecedent_weights, 2) * top_antecedent_emb, 1) # [k, emb]
with tf.variable_scope("f"):
f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb]
top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb]
top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1]
top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c]
top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c]
same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c]
non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1]
pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c]
dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1]
top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1]
loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k]
loss = tf.reduce_sum(loss) # []
return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss
def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = 1 + span_ends - span_starts # [k]
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
span_indices = tf.expand_dims(tf.range(self.config["max_span_width"]), 0) + tf.expand_dims(span_starts, 1) # [k, max_span_width]
span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width]
span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb]
with tf.variable_scope("head_scores"):
self.head_scores = util.projection(context_outputs, 1) # [num_words, 1]
span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]
span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1]
span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]
span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]
span_emb_list.append(span_head_emb)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def get_mention_scores(self, span_emb):
with tf.variable_scope("mention_scores"):
return util.ffnn(span_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, 1]
def softmax_loss(self, antecedent_scores, antecedent_labels):
gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
return log_norm - marginalized_gold_scores # [k]
def bucket_distance(self, distances):
"""
Places the given values (designed for distances) into 10 semi-logscale buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
"""
logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3
use_identity = tf.to_int32(distances <= 4)
combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
return tf.clip_by_value(combined_idx, 0, 9)
def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb):
k = util.shape(top_span_emb, 0)
c = util.shape(top_antecedents, 1)
feature_emb_list = []
if self.config["use_metadata"]:
top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c]
same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c]
speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]), tf.to_int32(same_speaker)) # [k, c, emb]
feature_emb_list.append(speaker_pair_emb)
tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb]
feature_emb_list.append(tiled_genre_emb)
if self.config["use_features"]:
antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c]
antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c]
feature_emb_list.append(antecedent_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]
target_emb = tf.expand_dims(top_span_emb, 1) # [k, 1, emb]
similarity_emb = top_antecedent_emb * target_emb # [k, c, emb]
target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]
pair_emb = tf.concat([target_emb, top_antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb]
with tf.variable_scope("slow_antecedent_scores"):
slow_antecedent_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1]
slow_antecedent_scores = tf.squeeze(slow_antecedent_scores, 2) # [k, c]
return slow_antecedent_scores # [k, c]
def get_fast_antecedent_scores(self, top_span_emb):
with tf.variable_scope("src_projection"):
source_top_span_emb = tf.nn.dropout(util.projection(top_span_emb, util.shape(top_span_emb, -1)), self.dropout) # [k, emb]
target_top_span_emb = tf.nn.dropout(top_span_emb, self.dropout) # [k, emb]
return tf.matmul(source_top_span_emb, target_top_span_emb, transpose_b=True) # [k, k]
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def lstm_contextualize(self, text_emb, text_len, text_len_mask):
num_sentences = tf.shape(text_emb)[0]
current_inputs = text_emb # [num_sentences, max_sentence_length, emb]
for layer in range(self.config["contextualization_layers"]):
with tf.variable_scope("layer_{}".format(layer)):
with tf.variable_scope("fw_cell"):
cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
with tf.variable_scope("bw_cell"):
cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
(fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=current_inputs,
sequence_length=text_len,
initial_state_fw=state_fw,
initial_state_bw=state_bw)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout)
if layer > 0:
highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb]
text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs
current_inputs = text_outputs
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
def get_predicted_antecedents(self, antecedents, antecedent_scores):
predicted_antecedents = []
for i, index in enumerate(np.argmax(antecedent_scores, axis=1) - 1):
if index < 0:
predicted_antecedents.append(-1)
else:
predicted_antecedents.append(antecedents[i, index])
return predicted_antecedents
def get_predicted_clusters(self, top_span_starts, top_span_ends, predicted_antecedents):
mention_to_predicted = {}
predicted_clusters = []
for i, predicted_index in enumerate(predicted_antecedents):
if predicted_index < 0:
continue
assert i > predicted_index
predicted_antecedent = (int(top_span_starts[predicted_index]), int(top_span_ends[predicted_index]))
if predicted_antecedent in mention_to_predicted:
predicted_cluster = mention_to_predicted[predicted_antecedent]
else:
predicted_cluster = len(predicted_clusters)
predicted_clusters.append([predicted_antecedent])
mention_to_predicted[predicted_antecedent] = predicted_cluster
mention = (int(top_span_starts[i]), int(top_span_ends[i]))
predicted_clusters[predicted_cluster].append(mention)
mention_to_predicted[mention] = predicted_cluster
predicted_clusters = [tuple(pc) for pc in predicted_clusters]
mention_to_predicted = { m:predicted_clusters[i] for m,i in mention_to_predicted.items() }
return predicted_clusters, mention_to_predicted
def evaluate_coref(self, top_span_starts, top_span_ends, predicted_antecedents, gold_clusters, evaluator):
gold_clusters = [tuple(tuple(m) for m in gc) for gc in gold_clusters]
mention_to_gold = {}
for gc in gold_clusters:
for mention in gc:
mention_to_gold[mention] = gc
predicted_clusters, mention_to_predicted = self.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)
evaluator.update(predicted_clusters, gold_clusters, mention_to_predicted, mention_to_gold)
return predicted_clusters
def load_eval_data(self):
if self.eval_data is None:
def load_line(line):
example = json.loads(line)
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
self.eval_data = [load_line(l) for l in f.readlines()]
num_words = sum(tensorized_example[2].sum() for tensorized_example, _ in self.eval_data)
print("Loaded {} eval examples.".format(len(self.eval_data)))
def evaluate(self, session, official_stdout=False):
self.load_eval_data()
coref_predictions = {}
coref_evaluator = metrics.CorefEvaluator()
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
_, _, _, _, _, _, _, _, _, gold_starts, gold_ends, _ = tensorized_example
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(self.predictions, feed_dict=feed_dict)
predicted_antecedents = self.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
coref_predictions[example["doc_key"]] = self.evaluate_coref(top_span_starts, top_span_ends, predicted_antecedents, example["clusters"], coref_evaluator)
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
summary_dict = {}
conll_results = conll.evaluate_conll(self.config["conll_eval_path"], coref_predictions, official_stdout)
average_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
summary_dict["Average F1 (conll)"] = average_f1
print("Average F1 (conll): {:.2f}%".format(average_f1))
p,r,f = coref_evaluator.get_prf()
summary_dict["Average F1 (py)"] = f
print("Average F1 (py): {:.2f}%".format(f * 100))
summary_dict["Average precision (py)"] = p
print("Average precision (py): {:.2f}%".format(p * 100))
summary_dict["Average recall (py)"] = r
print("Average recall (py): {:.2f}%".format(r * 100))
return util.make_summary(summary_dict), average_f1
|
main.py
|
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import multiprocessing as mp
import re
import shutil
import time
import traceback
from importlib import import_module
from multiprocessing import Process
import os
import sys
sys.path.insert(0, os.getcwd())
import code.common.arguments as common_args
from code import get_benchmark, get_harness
from code.common import BENCHMARKS, SCENARIOS
from code.common import args_to_string, find_config_files, load_configs, run_command
from code.common import auditing
from code.common import logging, get_system, is_xavier
from code.common.config_parser import get_system_benchmark_config
from code.common.arguments import apply_overrides
from code.common.scopedMPS import ScopedMPS, turn_off_mps
def launch_handle_generate_engine(*args, **kwargs):
retries = 1
timeout = 7200
success = False
for i in range(retries):
# Build engines in another process to make sure we exit with clean cuda
# context so that MPS can be turned off.
from code.main import handle_generate_engine
p = Process(target=handle_generate_engine, args=args, kwargs=kwargs)
p.start()
try:
p.join(timeout)
except KeyboardInterrupt:
p.terminate()
p.join(timeout)
raise KeyboardInterrupt
if p.exitcode == 0:
success = True
break
if not success:
raise RuntimeError("Building engines failed!")
def copy_default_engine(benchmark):
"""Copy engine file from default path to new path."""
new_path = benchmark._get_engine_fpath(None, None) # Use default values
benchmark.config_ver = "default"
default_path = benchmark._get_engine_fpath(None, None)
logging.info("Copying {:} to {:}".format(default_path, new_path))
shutil.copyfile(default_path, new_path)
def handle_generate_engine(config, gpu=True, dla=True, copy_from_default=False):
benchmark_name = config["benchmark"]
logging.info(
"Building engines for {:} benchmark in {:} scenario...".format(
benchmark_name,
config["scenario"]))
start_time = time.time()
arglist = common_args.GENERATE_ENGINE_ARGS
config = apply_overrides(config, arglist)
if dla and "dla_batch_size" in config:
config["batch_size"] = config["dla_batch_size"]
logging.info("Building DLA engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"]))
b = get_benchmark(config)
if copy_from_default:
copy_default_engine(b)
else:
b.build_engines()
if gpu and "gpu_batch_size" in config:
config["batch_size"] = config["gpu_batch_size"]
config["dla_core"] = None
logging.info("Building GPU engine for {:}_{:}_{:}".format(config["system_id"], benchmark_name, config["scenario"]))
b = get_benchmark(config)
if copy_from_default:
copy_default_engine(b)
else:
b.build_engines()
end_time = time.time()
logging.info("Finished building engines for {:} benchmark in {:} scenario.".format(benchmark_name, config["scenario"]))
print("Time taken to generate engines: {:} seconds".format(end_time - start_time))
def handle_audit_verification(audit_test_name, config):
# Decouples the verification step from any auditing runs for better maintenance and testing
logging.info('AUDIT HARNESS: Running verification script...')
# Prepare log_dir
config['log_dir'] = os.path.join('build/compliance_logs', audit_test_name)
# Get a harness object
harness, config = get_harness(config=config, profile=None)
result = None
if audit_test_name == 'TEST01':
result = auditing.verify_test01(harness)
if result == 'TEST01 FALLBACK':
# Signals a fallback for failed test
# Process description:
# 1. Generate baseline_accuracy file
# 2. Calculate the accuracy of baseline, using the benchmark's accuracy script
# 3. Use same script to calculate accuracy of compliance run
# 4. Depending on accuracy level, declare success if two values are within defined tolerance.
logging.info('main.py notified for fallback handling on TEST01')
# Run compliance script to generate baseline file
full_log_dir = harness.get_full_log_dir()
results_path = os.path.join('results', harness.get_system_name(), harness._get_submission_benchmark_name(), harness.scenario)
harness_accuracy_log = os.path.join(results_path, 'accuracy/mlperf_log_accuracy.json')
compliance_accuracy_log = os.path.join(full_log_dir, 'mlperf_log_accuracy.json')
fallback_command = 'bash build/inference/compliance/nvidia/TEST01/create_accuracy_baseline.sh {} {}'.format(
harness_accuracy_log,
compliance_accuracy_log
)
# generates new file called mlperf_log_accuracy_baseline.json
run_command(fallback_command, get_output=True)
def move_file(src, dst):
logging.info('Moving file: {} --> {}'.format(src, dst))
shutil.move(src, dst)
def copy_file(src, dst):
logging.info('Copying file: {} --> {}'.format(src, dst))
shutil.copy(src, dst)
# Create accuracy and performance directories
accuracy_dir = os.path.join(full_log_dir, 'TEST01', 'accuracy')
performance_dir = os.path.join(full_log_dir, 'TEST01', 'performance', 'run_1')
os.makedirs(accuracy_dir, exist_ok=True)
os.makedirs(performance_dir, exist_ok=True)
# Get the accuracy of baseline file
fallback_result_baseline = check_accuracy('mlperf_log_accuracy_baseline.json', config, is_compliance=True)
# Move it to the submission dir
dest_path = os.path.join(accuracy_dir, 'baseline_accuracy.txt')
move_file('accuracy.txt', dest_path)
# Get the accuracy of compliance file
fallback_result_compliance = check_accuracy('{}/mlperf_log_accuracy.json'.format(full_log_dir), config, is_compliance=True)
# Move it to the submission dir - check_accuracy stores accuracy.txt in the directory
# name provided in its first argument. So this file will already be located inside get_full_log_dir()
src_path = os.path.join(full_log_dir, 'accuracy.txt')
dest_path = os.path.join(accuracy_dir, 'compliance_accuracy.txt')
move_file(src_path, dest_path)
# Move the required logs to their correct locations since run_verification.py has failed.
move_file('verify_accuracy.txt', os.path.join(full_log_dir, 'TEST01', 'verify_accuracy.txt'))
copy_file(os.path.join(full_log_dir, 'mlperf_log_accuracy.json'), os.path.join(accuracy_dir, 'mlperf_log_accuracy.json'))
copy_file(os.path.join(full_log_dir, 'mlperf_log_detail.txt'), os.path.join(performance_dir, 'mlperf_log_detail.txt'))
copy_file(os.path.join(full_log_dir, 'mlperf_log_summary.txt'), os.path.join(performance_dir, 'mlperf_log_summary.txt'))
# Need to run verify_performance.py script to get verify_performance.txt file.
verify_performance_command = ("python3 build/inference/compliance/nvidia/TEST01/verify_performance.py -r "
+ results_path + "/performance/run_1/mlperf_log_summary.txt" + " -t "
+ performance_dir + "/mlperf_log_summary.txt | tee " + full_log_dir + "/TEST01/verify_performance.txt")
run_command(verify_performance_command, get_output=True)
# Check level of accuracy - this test's tolerance depends on it
accuracy_level = config["accuracy_level"][:-1]
if accuracy_level == '99.9':
logging.info('High Accuracy benchmark detected. Tolerance set to 0.1%')
if not math.isclose(fallback_result_baseline, fallback_result_compliance, rel_tol=0.001):
raise ValueError('TEST01 + Fallback failure: BASELINE ACCURACY: {}, COMPLIANCE_ACCURACY: {}'.format(fallback_result_baseline, fallback_result_compliance))
else:
logging.info('AUDIT HARNESS: Success: TEST01 failure redeemed via fallback approach.')
print('TEST PASS')
elif accuracy_level == '99':
logging.info('Low Accuracy benchmark detected. Tolerance set to 1%')
if not math.isclose(fallback_result_baseline, fallback_result_compliance, rel_tol=0.01):
raise ValueError('TEST01 + Fallback failure: BASELINE ACCURACY: {}, COMPLIANCE_ACCURACY: {}'.format(fallback_result_baseline, fallback_result_compliance))
else:
logging.info('AUDIT HARNESS: Success: TEST01 failure redeemed via fallback approach.')
print('TEST PASS')
else:
raise ValueError('Accuracy level not supported: {}'.format(accuracy_level))
elif audit_test_name == 'TEST04-A' or audit_test_name == 'TEST04-B':
exclude_list = [BENCHMARKS.BERT, BENCHMARKS.DLRM, BENCHMARKS.RNNT]
if BENCHMARKS.alias(config['benchmark']) in exclude_list:
logging.info('TEST04 is not supported for benchmark {}. Ignoring request...'.format(config['benchmark']))
return None
result = auditing.verify_test04(harness)
elif audit_test_name == 'TEST05':
result = auditing.verify_test05(harness)
return result
def handle_run_harness(config, gpu=True, dla=True, profile=None,
power=False, generate_conf_files_only=False, compliance=False):
"""Run harness for given benchmark and scenario."""
benchmark_name = config["benchmark"]
logging.info("Running harness for {:} benchmark in {:} scenario...".format(benchmark_name, config["scenario"]))
arglist = common_args.getScenarioBasedHarnessArgs(config["scenario"])
config = apply_overrides(config, arglist)
# Validate arguments
if not dla:
config["dla_batch_size"] = None
if not gpu:
config["gpu_batch_size"] = None
# If we only want to generate conf_files, then set flag to true
if generate_conf_files_only:
config["generate_conf_files_only"] = True
profile = None
power = False
# MLPINF-829: Disable CUDA graphs when there is a profiler
if profile is not None:
logging.warn("Due to MLPINF-829, CUDA graphs results in a CUDA illegal memory access when run with a profiler \
on r460 driver. Force-disabling CUDA graphs.")
config["use_graphs"] = False
harness, config = get_harness(config, profile)
if power:
try:
from code.internal.power_measurements import PowerMeasurements
power_logfile_name = "{}_{}_{}_{}".format(
config.get("config_name"), config.get("accuracy_level"),
config.get("optimization_level"), config.get("inference_server")
)
power_measurements = PowerMeasurements("{}/{}/{}".format(
os.getcwd(),
"power_measurements",
power_logfile_name)
)
power_measurements.start()
except BaseException:
power_measurements = None
for key, value in config.items():
print("{} : {}".format(key, value))
result = ""
if compliance:
# AP: We need to keep the compliance logs separated from accuracy and perf
# otherwise it messes up the update_results process
config['log_dir'] = os.path.join('build/compliance_logs', config['audit_test_name'])
logging.info('AUDIT HARNESS: Overriding log_dir for compliance run. Set to ' + config['log_dir'])
# Launch the harness
passed = True
try:
result = harness.run_harness()
logging.info("Result: {:}".format(result))
except Exception as _:
traceback.print_exc(file=sys.stdout)
passed = False
finally:
if power and power_measurements is not None:
power_measurements.stop()
if not passed:
raise RuntimeError("Run harness failed!")
if generate_conf_files_only and result == "Generated conf files":
return
# Append result to perf result summary log.
log_dir = config["log_dir"]
summary_file = os.path.join(log_dir, "perf_harness_summary.json")
results = {}
if os.path.exists(summary_file):
with open(summary_file) as f:
results = json.load(f)
config_name = "{:}-{:}-{:}".format(harness.get_system_name(),
config["config_ver"],
config["scenario"])
if config_name not in results:
results[config_name] = {}
results[config_name][benchmark_name] = result
with open(summary_file, "w") as f:
json.dump(results, f)
# Check accuracy from loadgen logs.
if not compliance:
# TEST01 fails the accuracy test because it produces fewer predictions than expected
accuracy = check_accuracy(os.path.join(harness.get_full_log_dir(), "mlperf_log_accuracy.json"), config)
summary_file = os.path.join(log_dir, "accuracy_summary.json")
results = {}
if os.path.exists(summary_file):
with open(summary_file) as f:
results = json.load(f)
if config_name not in results:
results[config_name] = {}
results[config_name][benchmark_name] = accuracy
with open(summary_file, "w") as f:
json.dump(results, f)
def check_accuracy(log_file, config, is_compliance=False):
"""Check accuracy of given benchmark."""
benchmark_name = config["benchmark"]
accuracy_targets = {
BENCHMARKS.BERT: 90.874,
BENCHMARKS.DLRM: 80.25,
BENCHMARKS.RNNT: 100.0 - 7.45225,
BENCHMARKS.ResNet50: 76.46,
BENCHMARKS.SSDMobileNet: 22.0,
BENCHMARKS.SSDResNet34: 20.0,
BENCHMARKS.UNET: 0.853,
}
threshold_ratio = float(config["accuracy_level"][:-1]) / 100
if not os.path.exists(log_file):
return "Cannot find accuracy JSON file."
# checking if log_file is empty by just reading first several bytes
# indeed, first 4B~6B is likely all we need to check: '', '[]', '[]\r', '[\n]\n', '[\r\n]\r\n', ...
# but checking 8B for safety
with open(log_file, 'r') as lf:
first_8B = lf.read(8)
if not first_8B or ('[' in first_8B and ']' in first_8B):
return "No accuracy results in PerformanceOnly mode."
dtype_expand_map = {"fp16": "float16", "fp32": "float32", "int8": "float16"} # Use FP16 output for INT8 mode
# Since submission-checker uses a relative import, but we are running from main.py, we need to surface its directory
# into sys.path so it can successfully import it.
# Insert into index 1 so that current working directory still takes precedence.
sys.path.insert(1, os.path.join(os.getcwd(), "build", "inference", "tools", "submission"))
accuracy_regex_map = import_module("submission-checker").ACC_PATTERN
threshold = accuracy_targets[benchmark_name] * threshold_ratio
# Every benchmark has its own accuracy script. Prepare commandline with args to the script.
skip_run_command = False
if benchmark_name in [BENCHMARKS.ResNet50]:
cmd = "python3 build/inference/vision/classification_and_detection/tools/accuracy-imagenet.py --mlperf-accuracy-file {:} \
--imagenet-val-file data_maps/imagenet/val_map.txt --dtype int32 ".format(log_file)
regex = accuracy_regex_map["acc"]
elif benchmark_name == BENCHMARKS.SSDResNet34:
cmd = "python3 build/inference/vision/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \
--coco-dir {:} --output-file build/ssd-resnet34-results.json --use-inv-map".format(
log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco"))
regex = accuracy_regex_map["mAP"]
elif benchmark_name == BENCHMARKS.SSDMobileNet:
cmd = "python3 build/inference/vision/classification_and_detection/tools/accuracy-coco.py --mlperf-accuracy-file {:} \
--coco-dir {:} --output-file build/ssd-mobilenet-results.json".format(
log_file, os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "coco"))
regex = accuracy_regex_map["mAP"]
elif benchmark_name == BENCHMARKS.BERT:
# Having issue installing tokenizers on Xavier...
if is_xavier():
cmd = "python3 code/bert/tensorrt/accuracy-bert.py --mlperf-accuracy-file {:} --squad-val-file {:}".format(
log_file, os.path.join(os.environ.get("DATA_DIR", "build/data"), "squad", "dev-v1.1.json"))
else:
dtype = config["precision"].lower()
if dtype in dtype_expand_map:
dtype = dtype_expand_map[dtype]
val_data_path = os.path.join(
os.environ.get("DATA_DIR", "build/data"),
"squad", "dev-v1.1.json")
vocab_file_path = "build/models/bert/vocab.txt"
if 'CPU' in config['config_name']:
vocab_file_path = "build/data/squad/vocab.txt"
output_prediction_path = os.path.join(os.path.dirname(log_file), "predictions.json")
cmd = "python3 build/inference/language/bert/accuracy-squad.py " \
"--log_file {:} --vocab_file {:} --val_data {:} --out_file {:} " \
"--output_dtype {:}".format(log_file, vocab_file_path, val_data_path, output_prediction_path, dtype)
regex = accuracy_regex_map["F1"]
elif benchmark_name == BENCHMARKS.DLRM:
cmd = "python3 build/inference/recommendation/dlrm/pytorch/tools/accuracy-dlrm.py --mlperf-accuracy-file {:} " \
"--day-23-file build/data/criteo/day_23 --aggregation-trace-file " \
"build/preprocessed_data/criteo/full_recalib/sample_partition_trace.txt".format(log_file)
regex = accuracy_regex_map["AUC"]
elif benchmark_name == BENCHMARKS.RNNT:
# Having issue installing librosa on Xavier...
if is_xavier():
cmd = "python3 code/rnnt/tensorrt/accuracy.py --loadgen_log {:}".format(log_file)
else:
# RNNT output indices are in INT8
cmd = "python3 build/inference/speech_recognition/rnnt/accuracy_eval.py " \
"--log_dir {:} --dataset_dir build/preprocessed_data/LibriSpeech/dev-clean-wav " \
"--manifest build/preprocessed_data/LibriSpeech/dev-clean-wav.json " \
"--output_dtype int8".format(os.path.dirname(log_file))
regex = accuracy_regex_map["WER"]
elif benchmark_name == BENCHMARKS.UNET:
postprocess_dir = "build/brats_postprocessed_data"
if not os.path.exists(postprocess_dir):
os.makedirs(postprocess_dir)
dtype = config["precision"].lower()
if dtype in dtype_expand_map:
dtype = dtype_expand_map[dtype]
cmd = "python3 build/inference/vision/medical_imaging/3d-unet-brats19/accuracy-brats.py --log_file {:} " \
"--output_dtype {:} --preprocessed_data_dir build/preprocessed_data/brats/brats_reference_preprocessed " \
"--postprocessed_data_dir {:} " \
"--label_data_dir build/preprocessed_data/brats/brats_reference_raw/Task043_BraTS2019/labelsTr".format(log_file, dtype, postprocess_dir)
regex = accuracy_regex_map["DICE"]
# Having issue installing nnUnet on Xavier...
if is_xavier():
# Internally, run on another node to process the accuracy.
try:
cmd = cmd.replace(os.getcwd(), ".", 1)
temp_cmd = "ssh -oBatchMode=yes computelab-frontend-02 \"timeout 1200 srun --gres=gpu:ga100:1 -t 20:00 " \
"bash -c 'cd {:} && make prebuild DOCKER_COMMAND=\\\"{:}\\\"'\"".format(os.getcwd(), cmd)
full_output = run_command(temp_cmd, get_output=True)
start_line_idx = -1
end_line_idx = -1
for (line_idx, line) in enumerate(full_output):
if "Please cite the following paper when using nnUNet:" in line:
start_line_idx = line_idx
if "Done!" in line:
end_line_idx = line_idx
assert start_line_idx != -1 and end_line_idx != -1, "Failed in accuracy checking"
output = full_output[start_line_idx:end_line_idx + 1]
skip_run_command = True
except Exception as e:
logging.warning(
"Accuracy checking for 3DUnet is not supported on Xavier. Please run the following command on desktop:\n{:}".format(cmd))
output = ["Accuracy: mean = 1.0000, whole tumor = 1.0000, tumor core = 1.0000, enhancing tumor = 1.0000"]
skip_run_command = True
else:
raise ValueError("Unknown benchmark: {:}".format(benchmark_name))
# Run benchmark's accuracy script and parse output for result.
if not skip_run_command:
output = run_command(cmd, get_output=True)
result_regex = re.compile(regex)
accuracy = None
with open(os.path.join(os.path.dirname(log_file), "accuracy.txt"), "w") as f:
for line in output:
print(line, file=f)
for line in output:
result_match = result_regex.match(line)
if not result_match is None:
accuracy = float(result_match.group(1))
break
accuracy_result = "PASSED" if accuracy is not None and accuracy >= threshold else "FAILED"
if accuracy_result == "FAILED" and not is_compliance:
raise RuntimeError(
"Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}!".format(
accuracy, threshold, accuracy_result))
if is_compliance:
return accuracy # Needed for numerical comparison
return "Accuracy = {:.3f}, Threshold = {:.3f}. Accuracy test {:}.".format(
accuracy, threshold, accuracy_result)
def handle_calibrate(config):
benchmark_name = config["benchmark"]
logging.info("Generating calibration cache for Benchmark \"{:}\"".format(benchmark_name))
config = apply_overrides(config, common_args.CALIBRATION_ARGS)
config["dla_core"] = None
config["force_calibration"] = True
b = get_benchmark(config)
b.calibrate()
def main(main_args, system):
"""
Args:
main_args: Args parsed from user input.
system: System to use
"""
system_id = system.get_id()
# Turn off MPS in case it's turned on.
turn_off_mps()
# Get user's benchmarks, else run all.
benchmarks = BENCHMARKS.ALL
if main_args["benchmarks"] is not None:
benchmarks = main_args["benchmarks"].split(",")
benchmarks = [BENCHMARKS.alias(b) for b in benchmarks]
# Get user's scenarios, else use all.
scenarios = SCENARIOS.ALL
if main_args["scenarios"] is not None:
scenarios = main_args["scenarios"].split(",")
scenarios = [SCENARIOS.alias(s) for s in scenarios]
profile = main_args.get("profile", None)
power = main_args.get("power", False)
# Automatically find config file paths
config_files = main_args["configs"]
if config_files == "" or config_files is None:
config_files = find_config_files(benchmarks, scenarios)
if config_files == "":
logging.warn("Cannot find any valid configs for the specified benchmark-scenario pairs.")
return
logging.info("Using config files: {:}".format(str(config_files)))
configs = load_configs(config_files)
for config in configs:
base_benchmark_conf = get_system_benchmark_config(config, system_id)
if base_benchmark_conf is None:
continue
base_benchmark_conf["config_name"] = "{:}_{:}_{:}".format(
system_id,
base_benchmark_conf["benchmark"],
base_benchmark_conf["scenario"]
)
logging.info("Processing config \"{:}\"".format(base_benchmark_conf["config_name"]))
# Load config_ver / apply overrides
conf_vers = main_args.get("config_ver", "default").split(",")
# Build default first. This is because some config_vers only modify harness args, and the engine is the same as
# default. In this case, we build default first, and copy it instead of rebuilding it.
if "default" in conf_vers:
conf_vers = ["default"] + list(set(conf_vers) - {"default"})
elif "all" in conf_vers:
tmp = ["default"] + list(base_benchmark_conf.get("config_ver", {}).keys())
# As per request, 'all' should skip 'maxQ' config_vers for now. MaxQ should only be run when specified
# directly.
conf_vers = []
for s in tmp:
if "maxq" not in s.lower() and "hetero" not in s.lower():
conf_vers.append(s)
for conf_ver in conf_vers:
benchmark_conf = dict(base_benchmark_conf) # Copy the config so we don't modify it
# These fields are canonical names that refer to certain config versions
benchmark_conf["accuracy_level"] = "99%"
benchmark_conf["optimization_level"] = "plugin-enabled"
benchmark_conf["inference_server"] = "lwis"
equiv_to_default = False
if conf_ver != "default":
if "config_ver" not in benchmark_conf or conf_ver not in benchmark_conf["config_ver"]:
logging.warn(
"--config_ver={:} does not exist in config file '{:}'".format(conf_ver, benchmark_conf["config_name"]))
continue
else:
if "high_accuracy" in conf_ver:
benchmark_conf["accuracy_level"] = "99.9%"
if "ootb" in conf_ver:
benchmark_conf["optimization_level"] = "ootb"
# "inference_server" is set when we run the harness
overrides = benchmark_conf["config_ver"][conf_ver]
# Enforce Triton check
if "triton" in conf_ver.lower() and not overrides.get("use_triton", False):
raise RuntimeError("conf_ver={} references Triton harness, but 'use_triton' is false".format(conf_ver))
# Check if this config_ver is equivalent to the default engine
# RNNT has multiple engines, so disable the equiv_to_default.
if benchmark_conf["benchmark"] != BENCHMARKS.RNNT:
gen_eng_argset = set(common_args.GENERATE_ENGINE_ARGS)
override_argset = set(overrides.keys())
equiv_to_default = (len(gen_eng_argset & override_argset) == 0)
benchmark_conf.update(overrides)
# Update the config_ver key to be the actual string name, not the overrides
benchmark_conf["config_ver"] = conf_ver
need_gpu = not main_args["no_gpu"]
need_dla = not main_args["gpu_only"]
# Override the system_name if it exists
if "system_name" in main_args:
benchmark_conf["system_name"] = main_args["system_name"]
# Check for use_cpu
if system_id.startswith("Triton_CPU"):
benchmark_conf["use_cpu"] = True
# Generate engines.
if main_args["action"] == "generate_engines":
# Turn on MPS if server scenario and if active_sms is specified.
benchmark_conf = apply_overrides(benchmark_conf, ["active_sms"])
active_sms = benchmark_conf.get("active_sms", None)
copy_from_default = ("default" in conf_vers) and equiv_to_default
if copy_from_default:
logging.info(
"config_ver={:} only modifies harness args. Re-using default engine.".format(conf_ver))
_gen_args = [benchmark_conf]
_gen_kwargs = {
"gpu": need_gpu,
"dla": need_dla,
"copy_from_default": copy_from_default
}
if not main_args["no_child_process"]:
if config["scenario"] == SCENARIOS.Server and active_sms is not None and active_sms < 100:
with ScopedMPS(active_sms):
launch_handle_generate_engine(*_gen_args, **_gen_kwargs)
else:
launch_handle_generate_engine(*_gen_args, **_gen_kwargs)
else:
handle_generate_engine(*_gen_args, **_gen_kwargs)
# Run CPU harness:
elif main_args["action"] == "run_cpu_harness":
auditing.cleanup()
benchmark_conf["use_cpu"] = True
handle_run_harness(benchmark_conf, False, False, None, power)
# Run harness.
elif main_args["action"] == "run_harness":
# In case there's a leftover audit.config file from a prior compliance run or other reason
# we need to delete it or we risk silent failure.
auditing.cleanup()
handle_run_harness(benchmark_conf, need_gpu, need_dla, profile, power)
elif main_args["action"] == "run_audit_harness" or main_args["action"] == "run_cpu_audit_harness":
logging.info('\n\n\nRunning compliance harness for test ' + main_args['audit_test'] + '\n\n\n')
# Find the correct audit.config file and move it in current directory
dest_config = auditing.load(main_args['audit_test'], benchmark_conf['benchmark'])
# Make sure the log_file override is valid
os.makedirs("build/compliance_logs", exist_ok=True)
# Pass audit test name to handle_run_harness via benchmark_conf
benchmark_conf['audit_test_name'] = main_args['audit_test']
if main_args["action"] == "run_cpu_audit_harness":
need_gpu = False
need_dla = False
profile = None
benchmark_conf["use_cpu"] = True
# Run harness
handle_run_harness(benchmark_conf, need_gpu, need_dla, profile, power, compliance=True)
# Cleanup audit.config
logging.info("AUDIT HARNESS: Cleaning Up audit.config...")
auditing.cleanup()
elif main_args["action"] == "run_audit_verification":
logging.info("Running compliance verification for test " + main_args['audit_test'])
handle_audit_verification(audit_test_name=main_args['audit_test'], config=benchmark_conf)
auditing.cleanup()
elif main_args["action"] == "run_cpu_audit_verification":
logging.info("Running compliance verification for test " + main_args['audit_test'])
benchmark_conf["use_cpu"] = True
handle_audit_verification(audit_test_name=main_args['audit_test'], config=benchmark_conf)
auditing.cleanup()
elif main_args["action"] == "calibrate":
# To generate calibration cache, we only need to run each benchmark once.
# Use offline config.
if benchmark_conf["scenario"] == SCENARIOS.Offline:
handle_calibrate(benchmark_conf)
elif main_args["action"] == "generate_conf_files":
handle_run_harness(benchmark_conf, need_gpu, need_dla, generate_conf_files_only=True)
if __name__ == "__main__":
mp.set_start_method("spawn")
# Check any invalid/misspelling flags.
common_args.check_args()
main_args = common_args.parse_args(common_args.MAIN_ARGS)
# Load System ID
system = get_system()
logging.info("Detected System ID: " + system.get_id())
print("==============main.py================", end='\n')
print("system.get_id", str(system.get_id()), end='\n')
print("main_args= ",str(main_args), "system= ", str(system), end='\n')
print("=====================================", end='\n')
main(main_args, system)
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.android import ANDROID
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1140
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
if __name__ == "__main__" and ANDROID:
from common.spinner import Spinner
from common.text_window import TextWindow
else:
from common.spinner import FakeSpinner as Spinner
from common.text_window import FakeTextWindow as TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner()
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n')
compile_output += r
if retry:
print("scons build failed, cleaning in")
for i in range(3,-1,-1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("Openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common import android
from common.apk import update_apks, pm_apply_packages, start_offroad
from common.manager_helpers import print_cpu_usage
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
#"uploader": "selfdrive.loggerd.uploader",
#"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.controls.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
#"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
#"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
#"tombstoned": "selfdrive.tombstoned",
#"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": ("selfdrive/locationd", ["./paramsd"]),
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
#"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"driverview": "selfdrive.controls.lib.driverview",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord', 'paramsd']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
]
if ANDROID:
persistent_processes += [
'logcatd',
'tombstoned',
'updated',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'dmonitoringd',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'ubloxd',
#'locationd',
]
if WEBCAM:
car_started_processes += [
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'sensord',
'clocksd',
'gpsd',
'dmonitoringmodeld',
'deleter',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc],
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
shutdownd = Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",))
shutdownd.start()
# now loop
thermal_sock = messaging.sub_sock('thermal')
if os.getenv("GET_CPU_USAGE"):
proc_sock = messaging.sub_sock('procLog', conflate=True)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
logger_dead = False
start_t = time.time()
first_proc = None
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started and "driverview" not in running:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in reversed(car_started_processes):
kill_managed_process(p)
# this is ugly
if "driverview" not in running and params.get("IsDriverViewEnabled") == b"1":
start_managed_process("driverview")
elif "driverview" in running and params.get("IsDriverViewEnabled") == b"0":
kill_managed_process("driverview")
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
if os.getenv("GET_CPU_USAGE"):
dt = time.time() - start_t
# Get first sample
if dt > 30 and first_proc is None:
first_proc = messaging.recv_sock(proc_sock)
# Get last sample and exit
if dt > 90:
last_proc = messaging.recv_sock(proc_sock, wait=True)
cleanup_all_processes(None, None)
sys.exit(print_cpu_usage(first_proc, last_proc))
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
android.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("IsGeofenceEnabled", "-1"),
("SpeedLimitOffset", "0"),
("LongitudinalControl", "0"),
("LimitSetSpeed", "0"),
("LimitSetSpeedNeural", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except SystemExit:
raise
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
utils.py
|
import jwtoken
import threading
import os
m3ustr = '#EXTM3U x-tvg-url="http://botallen.live/epg.xml.gz" \n\n'
kodiPropLicenseType = "#KODIPROP:inputstream.adaptive.license_type=com.widevine.alpha"
def processTokenChunks(channelList):
global m3ustr
kodiPropLicenseUrl = ""
if not channelList:
print("Channel List is empty ..Exiting")
exit(1)
for channel in channelList:
ls_session_key = jwtoken.generateJWT(channel['channel_id'], iterative=False)
if ls_session_key != "":
licenseUrl = channel['channel_license_url'] + "&ls_session=" + ls_session_key
kodiPropLicenseUrl = "#KODIPROP:inputstream.adaptive.license_key=" + licenseUrl
else:
print("Didn't get license for channel: Id: {0} Name:{1}".format(channel['channel_id'],
channel['channel_name']))
print('Continuing...Please get license manually for channel :', channel['channel_name'])
m3ustr += "#EXTINF:-1 " + "tvg-id=ts" + channel['channel_id'] + " tvg-logo=" + channel['channel_logo'] + " group-title=" + channel['channel_genre'] + ", "
m3ustr += channel['channel_name'] + "\n" + kodiPropLicenseType + "\n" + kodiPropLicenseUrl + "\n" + channel['channel_url'] + "\n\n"
def m3ugen():
ts = []
global m3ustr
channelList = jwtoken.getUserChannelSubscribedList()
for i in range(0, len(channelList), 5):
t = threading.Thread(target=processTokenChunks, args=([channelList[i:i + 5]]))
ts.append(t)
t.start()
for t in ts:
t.join()
print("================================================================")
print("Found total {0} channels subscribed by user \nSaving them to m3u file".format(len(channelList)))
print("================================================================")
saveM3ustringtofile(m3ustr)
def saveM3ustringtofile(m3ustr):
with open("output/tataSkyChannels.m3u", "w") as allChannelPlaylistFile:
print("-----Writing playlist to file------")
allChannelPlaylistFile.write(m3ustr)
# file_exists = os.path.exists('output/allChannelPlaylist.m3u')
# print("M3U file physical exists? :: ", file_exists)
# print("M3U location is :: ", os.path.abspath("allChannelPlaylist.m3u"))
# print("Path :: ", str(os.getcwd()) + "/output/allChannelPlaylist.m3u")
def getPrintNote():
s = " *****************************************************\n" + "Welcome To TataSky Channel Generation Script\n" + \
"**********************************************************\n" + \
"- Using this script you can generate playable links based on the channels you have subscribed to \n" + \
"- You can always read the README.md file if you don't know how to use the generated file \n" + \
"- You can login using your password or generate an OTP. You need to enter both the Registered Mobile Number \n" + \
"\n Caution: This doesn't promote any kind of hacking or compromising anyone's details"
return s
if __name__ == '__main__':
m3ugen()
|
auth.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved.
#
import codecs
import copy
import json
import logging
import tempfile
import time
import uuid
from datetime import datetime
from os import getenv, makedirs, mkdir, path, remove, removedirs, rmdir
from os.path import expanduser
from threading import Lock, Thread
from typing import Dict, Optional, Union
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
Encoding,
NoEncryption,
PrivateFormat,
load_der_private_key,
load_pem_private_key,
)
from .auth_keypair import AuthByKeyPair
from .auth_usrpwdmfa import AuthByUsrPwdMfa
from .compat import IS_LINUX, IS_MACOS, IS_WINDOWS, urlencode
from .constants import (
HTTP_HEADER_ACCEPT,
HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_SERVICE_NAME,
HTTP_HEADER_USER_AGENT,
PARAMETER_CLIENT_REQUEST_MFA_TOKEN,
PARAMETER_CLIENT_STORE_TEMPORARY_CREDENTIAL,
)
from .description import (
COMPILER,
IMPLEMENTATION,
OPERATING_SYSTEM,
PLATFORM,
PYTHON_VERSION,
)
from .errorcode import ER_FAILED_TO_CONNECT_TO_DB
from .errors import (
BadGatewayError,
DatabaseError,
Error,
ForbiddenError,
ProgrammingError,
ServiceUnavailableError,
)
from .network import (
ACCEPT_TYPE_APPLICATION_SNOWFLAKE,
CONTENT_TYPE_APPLICATION_JSON,
ID_TOKEN_INVALID_LOGIN_REQUEST_GS_CODE,
KEY_PAIR_AUTHENTICATOR,
PYTHON_CONNECTOR_USER_AGENT,
ReauthenticationRequest,
)
from .options import installed_keyring, keyring
from .sqlstate import SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
from .version import VERSION
logger = logging.getLogger(__name__)
# Cache directory
CACHE_ROOT_DIR = (
getenv("SF_TEMPORARY_CREDENTIAL_CACHE_DIR")
or expanduser("~")
or tempfile.gettempdir()
)
if IS_WINDOWS:
CACHE_DIR = path.join(CACHE_ROOT_DIR, "AppData", "Local", "Snowflake", "Caches")
elif IS_MACOS:
CACHE_DIR = path.join(CACHE_ROOT_DIR, "Library", "Caches", "Snowflake")
else:
CACHE_DIR = path.join(CACHE_ROOT_DIR, ".cache", "snowflake")
if not path.exists(CACHE_DIR):
try:
makedirs(CACHE_DIR, mode=0o700)
except Exception as ex:
logger.debug("cannot create a cache directory: [%s], err=[%s]", CACHE_DIR, ex)
CACHE_DIR = None
logger.debug("cache directory: %s", CACHE_DIR)
# temporary credential cache
TEMPORARY_CREDENTIAL = {}
TEMPORARY_CREDENTIAL_LOCK = Lock()
# temporary credential cache file name
TEMPORARY_CREDENTIAL_FILE = "temporary_credential.json"
TEMPORARY_CREDENTIAL_FILE = (
path.join(CACHE_DIR, TEMPORARY_CREDENTIAL_FILE) if CACHE_DIR else ""
)
# temporary credential cache lock directory name
TEMPORARY_CREDENTIAL_FILE_LOCK = TEMPORARY_CREDENTIAL_FILE + ".lck"
# keyring
KEYRING_SERVICE_NAME = "net.snowflake.temporary_token"
KEYRING_USER = "temp_token"
KEYRING_DRIVER_NAME = "SNOWFLAKE-PYTHON-DRIVER"
ID_TOKEN = "ID_TOKEN"
MFA_TOKEN = "MFATOKEN"
class Auth(object):
"""Snowflake Authenticator."""
def __init__(self, rest):
self._rest = rest
@staticmethod
def base_auth_data(
user,
account,
application,
internal_application_name,
internal_application_version,
ocsp_mode,
login_timeout,
network_timeout=None,
):
return {
"data": {
"CLIENT_APP_ID": internal_application_name,
"CLIENT_APP_VERSION": internal_application_version,
"SVN_REVISION": VERSION[3],
"ACCOUNT_NAME": account,
"LOGIN_NAME": user,
"CLIENT_ENVIRONMENT": {
"APPLICATION": application,
"OS": OPERATING_SYSTEM,
"OS_VERSION": PLATFORM,
"PYTHON_VERSION": PYTHON_VERSION,
"PYTHON_RUNTIME": IMPLEMENTATION,
"PYTHON_COMPILER": COMPILER,
"OCSP_MODE": ocsp_mode.name,
"TRACING": logger.getEffectiveLevel(),
"LOGIN_TIMEOUT": login_timeout,
"NETWORK_TIMEOUT": network_timeout,
},
},
}
def authenticate(
self,
auth_instance,
account,
user,
database=None,
schema=None,
warehouse=None,
role=None,
passcode=None,
passcode_in_password=False,
mfa_callback=None,
password_callback=None,
session_parameters=None,
timeout=120,
) -> Dict[str, Union[str, int, bool]]:
logger.debug("authenticate")
if session_parameters is None:
session_parameters = {}
request_id = str(uuid.uuid4())
headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_ACCEPT: ACCEPT_TYPE_APPLICATION_SNOWFLAKE,
HTTP_HEADER_USER_AGENT: PYTHON_CONNECTOR_USER_AGENT,
}
if HTTP_HEADER_SERVICE_NAME in session_parameters:
headers[HTTP_HEADER_SERVICE_NAME] = session_parameters[
HTTP_HEADER_SERVICE_NAME
]
url = "/session/v1/login-request"
body_template = Auth.base_auth_data(
user,
account,
self._rest._connection.application,
self._rest._connection._internal_application_name,
self._rest._connection._internal_application_version,
self._rest._connection._ocsp_mode(),
self._rest._connection._login_timeout,
self._rest._connection._network_timeout,
)
body = copy.deepcopy(body_template)
# updating request body
logger.debug("assertion content: %s", auth_instance.assertion_content)
auth_instance.update_body(body)
logger.debug(
"account=%s, user=%s, database=%s, schema=%s, "
"warehouse=%s, role=%s, request_id=%s",
account,
user,
database,
schema,
warehouse,
role,
request_id,
)
url_parameters = {"request_id": request_id}
if database is not None:
url_parameters["databaseName"] = database
if schema is not None:
url_parameters["schemaName"] = schema
if warehouse is not None:
url_parameters["warehouse"] = warehouse
if role is not None:
url_parameters["roleName"] = role
url = url + "?" + urlencode(url_parameters)
# first auth request
if passcode_in_password:
body["data"]["EXT_AUTHN_DUO_METHOD"] = "passcode"
elif passcode:
body["data"]["EXT_AUTHN_DUO_METHOD"] = "passcode"
body["data"]["PASSCODE"] = passcode
if session_parameters:
body["data"]["SESSION_PARAMETERS"] = session_parameters
logger.debug(
"body['data']: %s",
{k: v for (k, v) in body["data"].items() if k != "PASSWORD"},
)
# accommodate any authenticator specific timeout requirements here.
# login_timeout comes from user configuration.
# Between login timeout and auth specific
# timeout use whichever value is smaller
if hasattr(auth_instance, "get_timeout"):
logger.debug(
f"Authenticator, {type(auth_instance).__name__}, implements get_timeout"
)
auth_timeout = min(
self._rest._connection.login_timeout, auth_instance.get_timeout()
)
else:
auth_timeout = self._rest._connection.login_timeout
logger.debug(f"Timeout set to {auth_timeout}")
try:
ret = self._rest._post_request(
url,
headers,
json.dumps(body),
timeout=auth_timeout,
socket_timeout=auth_timeout,
)
except ForbiddenError as err:
# HTTP 403
raise err.__class__(
msg=(
"Failed to connect to DB. "
"Verify the account name is correct: {host}:{port}. "
"{message}"
).format(
host=self._rest._host, port=self._rest._port, message=str(err)
),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
)
except (ServiceUnavailableError, BadGatewayError) as err:
# HTTP 502/504
raise err.__class__(
msg=(
"Failed to connect to DB. "
"Service is unavailable: {host}:{port}. "
"{message}"
).format(
host=self._rest._host, port=self._rest._port, message=str(err)
),
errno=ER_FAILED_TO_CONNECT_TO_DB,
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
)
# waiting for MFA authentication
if ret["data"].get("nextAction") == "EXT_AUTHN_DUO_ALL":
body["inFlightCtx"] = ret["data"]["inFlightCtx"]
body["data"]["EXT_AUTHN_DUO_METHOD"] = "push"
self.ret = {"message": "Timeout", "data": {}}
def post_request_wrapper(self, url, headers, body):
# get the MFA response
self.ret = self._rest._post_request(
url, headers, body, timeout=self._rest._connection.login_timeout
)
# send new request to wait until MFA is approved
t = Thread(
target=post_request_wrapper, args=[self, url, headers, json.dumps(body)]
)
t.daemon = True
t.start()
if callable(mfa_callback):
c = mfa_callback()
while not self.ret or self.ret.get("message") == "Timeout":
next(c)
else:
t.join(timeout=timeout)
ret = self.ret
if ret and ret["data"].get("nextAction") == "EXT_AUTHN_SUCCESS":
body = copy.deepcopy(body_template)
body["inFlightCtx"] = ret["data"]["inFlightCtx"]
# final request to get tokens
ret = self._rest._post_request(
url,
headers,
json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
)
elif not ret or not ret["data"].get("token"):
# not token is returned.
Error.errorhandler_wrapper(
self._rest._connection,
None,
DatabaseError,
{
"msg": (
"Failed to connect to DB. MFA "
"authentication failed: {"
"host}:{port}. {message}"
).format(
host=self._rest._host,
port=self._rest._port,
message=ret["message"],
),
"errno": ER_FAILED_TO_CONNECT_TO_DB,
"sqlstate": SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
},
)
return session_parameters # required for unit test
elif ret["data"].get("nextAction") == "PWD_CHANGE":
if callable(password_callback):
body = copy.deepcopy(body_template)
body["inFlightCtx"] = ret["data"]["inFlightCtx"]
body["data"]["LOGIN_NAME"] = user
body["data"]["PASSWORD"] = (
auth_instance.password
if hasattr(auth_instance, "password")
else None
)
body["data"]["CHOSEN_NEW_PASSWORD"] = password_callback()
# New Password input
ret = self._rest._post_request(
url,
headers,
json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
)
logger.debug("completed authentication")
if not ret["success"]:
errno = ret.get("code", ER_FAILED_TO_CONNECT_TO_DB)
if errno == ID_TOKEN_INVALID_LOGIN_REQUEST_GS_CODE:
# clear stored id_token if failed to connect because of id_token
# raise an exception for reauth without id_token
self._rest.id_token = None
delete_temporary_credential(self._rest._host, user, ID_TOKEN)
raise ReauthenticationRequest(
ProgrammingError(
msg=ret["message"],
errno=int(errno),
sqlstate=SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
)
)
if type(auth_instance) is AuthByKeyPair:
logger.debug(
"JWT Token authentication failed. "
"Token expires at: %s. "
"Current Time: %s",
str(auth_instance._jwt_token_exp),
str(datetime.utcnow()),
)
if type(auth_instance) is AuthByUsrPwdMfa:
delete_temporary_credential(self._rest._host, user, MFA_TOKEN)
Error.errorhandler_wrapper(
self._rest._connection,
None,
DatabaseError,
{
"msg": (
"Failed to connect to DB: {host}:{port}. " "{message}"
).format(
host=self._rest._host,
port=self._rest._port,
message=ret["message"],
),
"errno": ER_FAILED_TO_CONNECT_TO_DB,
"sqlstate": SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED,
},
)
else:
logger.debug(
"token = %s", "******" if ret["data"]["token"] is not None else "NULL"
)
logger.debug(
"master_token = %s",
"******" if ret["data"]["masterToken"] is not None else "NULL",
)
logger.debug(
"id_token = %s",
"******" if ret["data"].get("idToken") is not None else "NULL",
)
logger.debug(
"mfa_token = %s",
"******" if ret["data"].get("mfaToken") is not None else "NULL",
)
self._rest.update_tokens(
ret["data"]["token"],
ret["data"]["masterToken"],
master_validity_in_seconds=ret["data"].get("masterValidityInSeconds"),
id_token=ret["data"].get("idToken"),
mfa_token=ret["data"].get("mfaToken"),
)
self.write_temporary_credentials(
self._rest._host, user, session_parameters, ret
)
if "sessionId" in ret["data"]:
self._rest._connection._session_id = ret["data"]["sessionId"]
if "sessionInfo" in ret["data"]:
session_info = ret["data"]["sessionInfo"]
self._rest._connection._database = session_info.get("databaseName")
self._rest._connection._schema = session_info.get("schemaName")
self._rest._connection._warehouse = session_info.get("warehouseName")
self._rest._connection._role = session_info.get("roleName")
if "parameters" in ret["data"]:
session_parameters.update(
{p["name"]: p["value"] for p in ret["data"]["parameters"]}
)
self._rest._connection._update_parameters(session_parameters)
return session_parameters
def _read_temporary_credential(self, host, user, cred_type):
cred = None
if IS_MACOS or IS_WINDOWS:
if not installed_keyring:
logger.debug(
"Dependency 'keyring' is not installed, cannot cache id token. You might experience "
"multiple authentication pop ups while using ExternalBrowser Authenticator. To avoid "
"this please install keyring module using the following command : pip install "
"snowflake-connector-python[secure-local-storage]"
)
return
try:
cred = keyring.get_password(
build_temporary_credential_name(host, user, cred_type), user.upper()
)
except keyring.errors.KeyringError as ke:
logger.error(
"Could not retrieve {} from secure storage : {}".format(
cred_type, str(ke)
)
)
elif IS_LINUX:
read_temporary_credential_file()
cred = TEMPORARY_CREDENTIAL.get(host.upper(), {}).get(
build_temporary_credential_name(host, user, cred_type)
)
else:
logger.debug("OS not supported for Local Secure Storage")
return cred
def read_temporary_credentials(self, host, user, session_parameters):
if session_parameters.get(PARAMETER_CLIENT_STORE_TEMPORARY_CREDENTIAL, False):
self._rest.id_token = self._read_temporary_credential(host, user, ID_TOKEN)
if session_parameters.get(PARAMETER_CLIENT_REQUEST_MFA_TOKEN, False):
self._rest.mfa_token = self._read_temporary_credential(
host, user, MFA_TOKEN
)
def _write_temporary_credential(self, host, user, cred_type, cred):
if not cred:
logger.debug(
"no credential is given when try to store temporary credential"
)
return
if IS_MACOS or IS_WINDOWS:
if not installed_keyring:
logger.debug(
"Dependency 'keyring' is not installed, cannot cache id token. You might experience "
"multiple authentication pop ups while using ExternalBrowser Authenticator. To avoid "
"this please install keyring module using the following command : pip install "
"snowflake-connector-python[secure-local-storage]"
)
return
try:
keyring.set_password(
build_temporary_credential_name(host, user, cred_type),
user.upper(),
cred,
)
except keyring.errors.KeyringError as ke:
logger.error("Could not store id_token to keyring, %s", str(ke))
elif IS_LINUX:
write_temporary_credential_file(
host, build_temporary_credential_name(host, user, cred_type), cred
)
else:
logger.debug("OS not supported for Local Secure Storage")
def write_temporary_credentials(self, host, user, session_parameters, response):
if self._rest._connection.consent_cache_id_token and session_parameters.get(
PARAMETER_CLIENT_STORE_TEMPORARY_CREDENTIAL, False
):
self._write_temporary_credential(
host, user, ID_TOKEN, response["data"].get("idToken")
)
if session_parameters.get(PARAMETER_CLIENT_REQUEST_MFA_TOKEN, False):
self._write_temporary_credential(
host, user, MFA_TOKEN, response["data"].get("mfaToken")
)
return
def flush_temporary_credentials():
"""Flush temporary credentials in memory into disk. Need to hold TEMPORARY_CREDENTIAL_LOCK."""
global TEMPORARY_CREDENTIAL
global TEMPORARY_CREDENTIAL_FILE
for _ in range(10):
if lock_temporary_credential_file():
break
time.sleep(1)
else:
logger.debug(
"The lock file still persists after the maximum wait time."
"Will ignore it and write temporary credential file: %s",
TEMPORARY_CREDENTIAL_FILE,
)
try:
with open(
TEMPORARY_CREDENTIAL_FILE, "w", encoding="utf-8", errors="ignore"
) as f:
json.dump(TEMPORARY_CREDENTIAL, f)
except Exception as ex:
logger.debug(
"Failed to write a credential file: " "file=[%s], err=[%s]",
TEMPORARY_CREDENTIAL_FILE,
ex,
)
finally:
unlock_temporary_credential_file()
def write_temporary_credential_file(host, cred_name, cred):
"""Writes temporary credential file when OS is Linux."""
if not CACHE_DIR:
# no cache is enabled
return
global TEMPORARY_CREDENTIAL
global TEMPORARY_CREDENTIAL_LOCK
with TEMPORARY_CREDENTIAL_LOCK:
# update the cache
host_data = TEMPORARY_CREDENTIAL.get(host.upper(), {})
host_data[cred_name.upper()] = cred
TEMPORARY_CREDENTIAL[host.upper()] = host_data
flush_temporary_credentials()
def read_temporary_credential_file():
"""Reads temporary credential file when OS is Linux."""
if not CACHE_DIR:
# no cache is enabled
return
global TEMPORARY_CREDENTIAL
global TEMPORARY_CREDENTIAL_LOCK
global TEMPORARY_CREDENTIAL_FILE
with TEMPORARY_CREDENTIAL_LOCK:
for _ in range(10):
if lock_temporary_credential_file():
break
time.sleep(1)
else:
logger.debug(
"The lock file still persists. Will ignore and "
"write the temporary credential file: %s",
TEMPORARY_CREDENTIAL_FILE,
)
try:
with codecs.open(
TEMPORARY_CREDENTIAL_FILE, "r", encoding="utf-8", errors="ignore"
) as f:
TEMPORARY_CREDENTIAL = json.load(f)
return TEMPORARY_CREDENTIAL
except Exception as ex:
logger.debug(
"Failed to read a credential file. The file may not"
"exists: file=[%s], err=[%s]",
TEMPORARY_CREDENTIAL_FILE,
ex,
)
finally:
unlock_temporary_credential_file()
return None
def lock_temporary_credential_file():
global TEMPORARY_CREDENTIAL_FILE_LOCK
try:
mkdir(TEMPORARY_CREDENTIAL_FILE_LOCK)
return True
except OSError:
logger.debug(
"Temporary cache file lock already exists. Other "
"process may be updating the temporary "
)
return False
def unlock_temporary_credential_file():
global TEMPORARY_CREDENTIAL_FILE_LOCK
try:
rmdir(TEMPORARY_CREDENTIAL_FILE_LOCK)
return True
except OSError:
logger.debug("Temporary cache file lock no longer exists.")
return False
def delete_temporary_credential(host, user, cred_type):
if (IS_MACOS or IS_WINDOWS) and installed_keyring:
try:
keyring.delete_password(
build_temporary_credential_name(host, user, cred_type), user.upper()
)
except Exception as ex:
logger.error("Failed to delete credential in the keyring: err=[%s]", ex)
elif IS_LINUX:
temporary_credential_file_delete_password(host, user, cred_type)
def temporary_credential_file_delete_password(host, user, cred_type):
"""Remove credential from temporary credential file when OS is Linux."""
if not CACHE_DIR:
# no cache is enabled
return
global TEMPORARY_CREDENTIAL
global TEMPORARY_CREDENTIAL_LOCK
with TEMPORARY_CREDENTIAL_LOCK:
# update the cache
host_data = TEMPORARY_CREDENTIAL.get(host.upper(), {})
host_data.pop(build_temporary_credential_name(host, user, cred_type), None)
if not host_data:
TEMPORARY_CREDENTIAL.pop(host.upper(), None)
else:
TEMPORARY_CREDENTIAL[host.upper()] = host_data
flush_temporary_credentials()
def delete_temporary_credential_file():
"""Deletes temporary credential file and its lock file."""
global TEMPORARY_CREDENTIAL_FILE
try:
remove(TEMPORARY_CREDENTIAL_FILE)
except Exception as ex:
logger.debug(
"Failed to delete a credential file: " "file=[%s], err=[%s]",
TEMPORARY_CREDENTIAL_FILE,
ex,
)
try:
removedirs(TEMPORARY_CREDENTIAL_FILE_LOCK)
except Exception as ex:
logger.debug("Failed to delete credential lock file: err=[%s]", ex)
def build_temporary_credential_name(host, user, cred_type):
return "{host}:{user}:{driver}:{cred}".format(
host=host.upper(), user=user.upper(), driver=KEYRING_DRIVER_NAME, cred=cred_type
)
def get_token_from_private_key(
user: str, account: str, privatekey_path: str, key_password: Optional[str]
) -> str:
encoded_password = key_password.encode() if key_password is not None else None
with open(privatekey_path, "rb") as key:
p_key = load_pem_private_key(
key.read(), password=encoded_password, backend=default_backend()
)
private_key = p_key.private_bytes(
encoding=Encoding.DER,
format=PrivateFormat.PKCS8,
encryption_algorithm=NoEncryption(),
)
auth_instance = AuthByKeyPair(private_key, 1440 * 60) # token valid for 24 hours
return auth_instance.authenticate(
KEY_PAIR_AUTHENTICATOR, None, account, user, key_password
)
def get_public_key_fingerprint(private_key_file: str, password: str) -> str:
"""Helper function to generate the public key fingerprint from the private key file"""
with open(private_key_file, "rb") as key:
p_key = load_pem_private_key(
key.read(), password=password.encode(), backend=default_backend()
)
private_key = p_key.private_bytes(
encoding=Encoding.DER,
format=PrivateFormat.PKCS8,
encryption_algorithm=NoEncryption(),
)
private_key = load_der_private_key(
data=private_key, password=None, backend=default_backend()
)
return AuthByKeyPair.calculate_public_key_fingerprint(private_key)
|
__init__.py
|
import sys
import urllib2
from argparse import ArgumentParser
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from ConfigParser import RawConfigParser
from Queue import Queue
from thread import interrupt_main
from threading import Thread
from time import sleep
from traceback import print_exception
import logging
logger = logging.getLogger(__name__)
log_fmt = '%(asctime)s [%(levelname)s]: %(message)s (%(filename)s: %(lineno)d)'
logging.basicConfig(format=log_fmt)
class BugsnagAgent(object):
"""
The BugsnagAgent sits on your server and forwards exception payloads to
https://notify.bugsnag.com/.
It's designed to protect you against any latency spikes that may
occur talking across the internet in an exception handler.
"""
DEFAULTS = {
'endpoint': 'https://notify.bugsnag.com/',
'listen': '127.0.0.1',
'log_level': 'INFO',
'port': 3829,
'queue_length': 1000
}
FORWARDED_HEADERS = [
'bugsnag-sent-at',
'bugsnag-api-key',
'bugsnag-payload-version'
]
def __init__(self):
self.parse_config()
self.queue = Queue(self.DEFAULTS['queue_length'])
if self.log_level:
logger.setLevel(getattr(logging, self.log_level.upper()))
else:
logger.setLevel(getattr(logging, self.DEFAULTS['log_level']))
def parse_config(self):
"""
Initializes self.port, self.listen, self.endpoint and self.connection
"""
parser = ArgumentParser(description='Bugsnag Agent')
parser.add_argument(
'-c', '--config',
dest='config_file',
default='/etc/bugsnag.conf',
help='The path to your config file (default /etc/bugsnag.conf)'
)
parser.add_argument(
'-e', '--endpoint',
dest='endpoint',
help=("The URL of your Bugsnag server (default {})".format(
self.DEFAULTS['endpoint']))
)
parser.add_argument(
'-p', '--port',
dest='port',
type=int,
help=("The port to bind to (default {})".format(
self.DEFAULTS['port']))
)
parser.add_argument(
'-i', '--ip',
dest='listen',
help=("The IP to listen on (use 0.0.0.0 to allow anyone to "
"connect , default {})".format(self.DEFAULTS['listen']))
)
parser.add_argument(
'-l', '--log-level',
dest='log_level',
help=("Logging verbosity, default {}".format(
self.DEFAULTS['log_level']))
)
args = parser.parse_args()
config = RawConfigParser()
config.read(args.config_file)
# Iterate over arguments and set values in oder of precedence:
# 1 - Arguments
# 2 - Config file
# 3 - Internal defaults
conf_opts = {"port": config.getint,
"endpoint": config.get,
"listen": config.get,
"log_level": config.get,
"ip": config.get}
for opt, _ in vars(args).iteritems():
if getattr(args, opt) is not None:
setattr(self, opt, getattr(args, opt))
elif config.has_option('bugsnag', opt) and opt in conf_opts:
setattr(self, opt, conf_opts[opt]('bugsnag', opt))
else:
setattr(self, opt, self.DEFAULTS[opt])
def start(self):
"""
Run the agent, and wait for a SIGINT or SIGTERM.
"""
try:
server = Thread(target=self._thread(self._server), name='server')
server.setDaemon(True)
server.start()
for _ in range(0, 10):
client = Thread(
target=self._thread(self._client),
name='client'
)
client.setDaemon(True)
client.start()
logger.info("Bugsnag Agent started. http://{ip}:{port} -> "
"{endpoint}".format(
ip=self.listen,
port=self.port,
endpoint=self.endpoint
))
while True:
sleep(1000)
except KeyboardInterrupt:
# give threads time to print exceptions
sleep(0.1)
def enqueue(self, body, headers={}):
"""
Add a new payload to the queue.
"""
try:
self.queue.put_nowait({
'body':body,
'headers':headers
})
logger.info("Enqueued {body_length} bytes "
"({queue_size}/{queue_max_size})".format(
body_length=len(body),
queue_size=self.queue.qsize(),
queue_max_size=self.queue.maxsize
)
)
except:
logger.info(
"Discarding report as queue is full: {}".format(repr(body)))
def _server(self):
"""
Run the HTTP server on (self.listen, self.port) that puts
payloads into the queue.
"""
server = HTTPServer((self.listen, self.port),
BugsnagHTTPRequestHandler)
server.bugsnag = self
server.serve_forever()
def _client(self):
"""
Continually monitor the queue and send anything in it to Bugsnag.
"""
while True:
request = self.queue.get(True)
body = request['body']
headers = request['headers']
logger.info("Sending {body_length} bytes ({queue_size}/"
"{queue_max_size})".format(
body_length=len(body),
queue_size=self.queue.qsize(),
queue_max_size=self.queue.maxsize
)
)
try:
req = urllib2.Request(self.endpoint, body, headers)
res = urllib2.urlopen(req)
res.read()
except urllib2.URLError as e:
if hasattr(e, 'code') and e.code in (400, 500):
logger.warning('Bad response, removing report ({code}: {msg})'.format(
code=e.code,
msg=e.msg
))
else:
logger.warning('Cannot send request. Retrying in 5 seconds')
if logger.isEnabledFor(logging.DEBUG):
print_exception(*sys.exc_info())
sleep(5)
self.enqueue(body)
def _thread(self, target):
def run():
try:
target()
except:
interrupt_main()
print_exception(*sys.exc_info())
pass
return run
class BugsnagHTTPRequestHandler(BaseHTTPRequestHandler):
def do_OPTIONS(self):
"""
Enable CORS while running on a different host
"""
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, OPTIONS, POST, HEAD')
self.send_header("Access-Control-Allow-Headers", "*")
self.end_headers()
def do_GET(self):
"""
Show the current status of the agent
"""
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
bugsnag = self.server.bugsnag
self.wfile.write("Bugsnag agent: {listen}:{port} -> {endpoint} " \
"({queue_size}/{queue_max_size})\n".format(
listen=bugsnag.listen,
port=bugsnag.port,
endpoint=bugsnag.endpoint,
queue_size=bugsnag.queue.qsize(),
queue_max_size=bugsnag.queue.maxsize
)
)
def do_POST(self):
"""
Accept a payload for forwarding
"""
bugsnag = self.server.bugsnag
body = self.rfile.read(int(self.headers['Content-Length']))
headers = {}
for key, value in self.headers.items():
if key.lower() in BugsnagAgent.FORWARDED_HEADERS:
headers[key] = value
bugsnag.enqueue(body=body, headers=headers)
response = "OK {ip}:{port} -> {endpoint} " \
"({queue_size}/{queue_max_size})\n".format(
ip=bugsnag.listen,
port=bugsnag.port,
endpoint=bugsnag.endpoint,
queue_size=bugsnag.queue.qsize(),
queue_max_size=bugsnag.queue.maxsize
)
try:
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-Length', len(response))
self.end_headers()
self.wfile.write(response)
except:
logger.info('Client disconnected before waiting for response')
print_exception(*sys.exc_info())
logger.info('Continuing...')
if __name__ == '__main__':
BugsnagAgent().start()
def main():
BugsnagAgent().start()
|
top_k_acc.py
|
import multiprocessing as mp
import threading
from tqdm import tqdm
from joblib import Parallel, delayed
def top_k_acc(y_predicted, y_true, class_map, k):
'''Calculates the top_k-accuracy for the prediction of xgboost.'''
count_matching_species = 0
for i in range(len(y_predicted)):
pred = y_predicted[i]
_, sorted_species = zip(*reversed(sorted(zip(pred, list(class_map)))))
if y_true[i] in sorted_species[:k]:
count_matching_species += 1
return count_matching_species / len(y_predicted)
class top_k_accuracy():
'''Class was a try to speed up calculation with multicore but takes more time in the end.'''
def get_score(self, y_predicted, y_true, class_map, k):
self.y_true = y_true
self.class_map = class_map
self.k = k
self.y_predicted = y_predicted
jobs = []
for i in tqdm(range(len(self.y_predicted))):
count_matching_species = 0
#process = mp.Process(target=self.get_result, args=(i, count_matching_species))
#jobs.append(process)
thread = threading.Thread(target=self.get_result, args=(i, count_matching_species))
jobs.append(thread)
# Start the processes (i.e. calculate the random number lists)
for j in tqdm(jobs):
j.start()
# Ensure all of the processes have finished
for j in tqdm(jobs):
j.join()
return count_matching_species / len(self.y_predicted)
def get_result(self, i, count_matching_species):
pred = self.y_predicted[i]
_, sorted_species = zip(*reversed(sorted(zip(pred, list(self.class_map)))))
if self.y_true[i] in sorted_species[:self.k]:
count_matching_species += 1
class top_k_error_eval():
'''Calculates the top_k-accuracy for xgboost.'''
def __init__(self, species_map, y_valid, k):
self.species_map = species_map
self.y_valid = list(y_valid)
self.species_count = len(self.species_map)
self.k = k
def evaluate(self, y_predicted, _):
return ("top_" + str(self.k) + "_acc", 1 - top_k_acc(y_predicted, self.y_valid, self.species_map, self.k))
|
start_api_integ_base.py
|
from unittest import TestCase, skipIf
import threading
from subprocess import Popen
import time
import os
import random
from pathlib import Path
from tests.testing_utils import SKIP_DOCKER_MESSAGE, SKIP_DOCKER_TESTS
@skipIf(SKIP_DOCKER_TESTS, SKIP_DOCKER_MESSAGE)
class StartApiIntegBaseClass(TestCase):
template = None
binary_data_file = None
integration_dir = str(Path(__file__).resolve().parents[2])
@classmethod
def setUpClass(cls):
# This is the directory for tests/integration which will be used to file the testdata
# files for integ tests
cls.template = cls.integration_dir + cls.template_path
if cls.binary_data_file:
cls.binary_data_file = os.path.join(cls.integration_dir, cls.binary_data_file)
cls.port = str(StartApiIntegBaseClass.random_port())
cls.thread = threading.Thread(target=cls.start_api())
cls.thread.setDaemon(True)
cls.thread.start()
@classmethod
def start_api(cls):
command = "sam"
if os.getenv("SAM_CLI_DEV"):
command = "samdev"
cls.start_api_process = Popen([command, "local", "start-api", "-t", cls.template, "-p", cls.port])
# we need to wait some time for start-api to start, hence the sleep
time.sleep(5)
@classmethod
def tearDownClass(cls):
# After all the tests run, we need to kill the start-api process.
cls.start_api_process.kill()
@staticmethod
def random_port():
return random.randint(30000, 40000)
@staticmethod
def get_binary_data(filename):
if not filename:
return None
with open(filename, "rb") as fp:
return fp.read()
|
ls_public_bucket.py
|
import logging
import re
import uuid
from multiprocessing import Manager, Process, cpu_count, current_process
from queue import Empty
import boto3
import click
import datacube
from botocore import UNSIGNED
from botocore.config import Config
from datacube.index.hl import Doc2Dataset
from datacube.utils import changes
from osgeo import osr
from ruamel.yaml import YAML
GUARDIAN = "GUARDIAN_QUEUE_EMPTY"
AWS_PDS_TXT_SUFFIX = "MTL.txt"
MTL_PAIRS_RE = re.compile(r'(\w+)\s=\s(.*)')
bands_ls8 = [('1', 'coastal_aerosol'),
('2', 'blue'),
('3', 'green'),
('4', 'red'),
('5', 'nir'),
('6', 'swir1'),
('7', 'swir2'),
('8', 'panchromatic'),
('9', 'cirrus'),
('10', 'lwir1'),
('11', 'lwir2'),
('QUALITY', 'quality')]
bands_ls7 = [('1', 'blue'),
('2', 'green'),
('3', 'red'),
('4', 'nir'),
('5', 'swir1'),
('7', 'swir2'),
('QUALITY', 'quality')]
def _parse_value(s):
s = s.strip('"')
for parser in [int, float]:
try:
return parser(s)
except ValueError:
pass
return s
def _parse_group(lines):
tree = {}
for line in lines:
match = MTL_PAIRS_RE.findall(line)
if match:
key, value = match[0]
if key == 'GROUP':
tree[value] = _parse_group(lines)
elif key == 'END_GROUP':
break
else:
tree[key] = _parse_value(value)
return tree
def get_geo_ref_points(info):
return {
'ul': {'x': info['CORNER_UL_PROJECTION_X_PRODUCT'], 'y': info['CORNER_UL_PROJECTION_Y_PRODUCT']},
'ur': {'x': info['CORNER_UR_PROJECTION_X_PRODUCT'], 'y': info['CORNER_UR_PROJECTION_Y_PRODUCT']},
'll': {'x': info['CORNER_LL_PROJECTION_X_PRODUCT'], 'y': info['CORNER_LL_PROJECTION_Y_PRODUCT']},
'lr': {'x': info['CORNER_LR_PROJECTION_X_PRODUCT'], 'y': info['CORNER_LR_PROJECTION_Y_PRODUCT']},
}
def get_coords(geo_ref_points, spatial_ref):
t = osr.CoordinateTransformation(spatial_ref, spatial_ref.CloneGeogCS())
def transform(p):
lon, lat, z = t.TransformPoint(p['x'], p['y'])
return {'lon': lon, 'lat': lat}
return {key: transform(p) for key, p in geo_ref_points.items()}
def satellite_ref(sat):
"""
To load the band_names for referencing either LANDSAT8 or LANDSAT7 bands
"""
if sat == 'LANDSAT_8':
sat_img = bands_ls8
elif sat == 'LANDSAT_7' or sat == 'LANDSAT_5':
sat_img = bands_ls7
else:
raise ValueError('Satellite data Not Supported')
return sat_img
def absolutify_paths(doc, bucket_name, obj_key):
objt_key = format_obj_key(obj_key)
for band in doc['image']['bands'].values():
band['path'] = get_s3_url(bucket_name, objt_key + '/' + band['path'])
return doc
def make_metadata_doc(mtl_data, bucket_name, object_key):
mtl_product_info = mtl_data['PRODUCT_METADATA']
mtl_metadata_info = mtl_data['METADATA_FILE_INFO']
satellite = mtl_product_info['SPACECRAFT_ID']
instrument = mtl_product_info['SENSOR_ID']
acquisition_date = mtl_product_info['DATE_ACQUIRED']
scene_center_time = mtl_product_info['SCENE_CENTER_TIME']
level = mtl_product_info['DATA_TYPE']
product_type = 'L1TP'
sensing_time = acquisition_date + ' ' + scene_center_time
cs_code = 32600 + mtl_data['PROJECTION_PARAMETERS']['UTM_ZONE']
label = mtl_metadata_info['LANDSAT_SCENE_ID']
spatial_ref = osr.SpatialReference()
spatial_ref.ImportFromEPSG(cs_code)
geo_ref_points = get_geo_ref_points(mtl_product_info)
coordinates = get_coords(geo_ref_points, spatial_ref)
bands = satellite_ref(satellite)
doc = {
'id': str(uuid.uuid5(uuid.NAMESPACE_URL, get_s3_url(bucket_name, object_key))),
'processing_level': level,
'product_type': product_type,
'creation_dt': str(acquisition_date),
'label': label,
'platform': {'code': satellite},
'instrument': {'name': instrument},
'extent': {
'from_dt': sensing_time,
'to_dt': sensing_time,
'center_dt': sensing_time,
'coord': coordinates,
},
'format': {'name': 'GeoTiff'},
'grid_spatial': {
'projection': {
'geo_ref_points': geo_ref_points,
'spatial_reference': 'EPSG:%s' % cs_code,
}
},
'image': {
'bands': {
band[1]: {
'path': mtl_product_info['FILE_NAME_BAND_' + band[0]],
'layer': 1,
} for band in bands
}
},
'lineage': {'source_datasets': {}},
}
doc = absolutify_paths(doc, bucket_name, object_key)
return doc
def format_obj_key(obj_key):
obj_key = '/'.join(obj_key.split("/")[:-1])
return obj_key
def get_s3_url(bucket_name, obj_key):
return 's3://{bucket_name}/{obj_key}'.format(
bucket_name=bucket_name, obj_key=obj_key)
def archive_document(doc, uri, index, sources_policy):
def get_ids(dataset):
ds = index.datasets.get(dataset.id, include_sources=True)
for source in ds.sources.values():
yield source.id
yield dataset.id
resolver = Doc2Dataset(index)
dataset, _ = resolver(doc, uri)
index.datasets.archive(get_ids(dataset))
logging.info("Archiving %s and all sources of %s", dataset.id, dataset.id)
def add_dataset(doc, uri, index, sources_policy):
logging.info("Indexing %s", uri)
resolver = Doc2Dataset(index)
dataset, err = resolver(doc, uri)
if err is not None:
logging.error("%s", err)
else:
try:
index.datasets.add(dataset, sources_policy=sources_policy) # Source policy to be checked in sentinel 2 datase types
except changes.DocumentMismatchError:
index.datasets.update(dataset, {tuple(): changes.allow_any})
except Exception as e:
err = e
logging.error("Unhandled exception %s", e)
return dataset, err
def worker(config, bucket_name, prefix, suffix, start_date, end_date, func, unsafe, sources_policy, queue):
dc = datacube.Datacube(config=config)
index = dc.index
s3 = boto3.resource("s3", config=Config(signature_version=UNSIGNED))
safety = 'safe' if not unsafe else 'unsafe'
while True:
try:
key = queue.get(timeout=60)
if key == GUARDIAN:
break
logging.info("Processing %s %s", key, current_process())
obj = s3.Object(bucket_name, key).get()
raw = obj['Body'].read()
if suffix == AWS_PDS_TXT_SUFFIX:
# Attempt to process text document
raw_string = raw.decode('utf8')
txt_doc = _parse_group(iter(raw_string.split("\n")))['L1_METADATA_FILE']
data = make_metadata_doc(txt_doc, bucket_name, key)
else:
yaml = YAML(typ=safety, pure=False)
yaml.default_flow_style = False
data = yaml.load(raw)
uri = get_s3_url(bucket_name, key)
cdt = data['creation_dt']
# Use the fact lexicographical ordering matches the chronological ordering
if cdt >= start_date and cdt < end_date:
logging.info("calling %s", func)
func(data, uri, index, sources_policy)
queue.task_done()
except Empty:
break
except EOFError:
break
def iterate_datasets(bucket_name, config, prefix, suffix, start_date, end_date, func, unsafe, sources_policy):
manager = Manager()
queue = manager.Queue()
s3 = boto3.resource('s3', config=Config(signature_version=UNSIGNED))
bucket = s3.Bucket(bucket_name)
logging.info("Bucket : %s prefix: %s ", bucket_name, str(prefix))
# safety = 'safe' if not unsafe else 'unsafe'
worker_count = cpu_count() * 2
processess = []
for i in range(worker_count):
proc = Process(target=worker, args=(config, bucket_name, prefix, suffix, start_date, end_date, func, unsafe, sources_policy, queue,))
processess.append(proc)
proc.start()
for obj in bucket.objects.filter(Prefix=str(prefix)):
if (obj.key.endswith(suffix)):
queue.put(obj.key)
for i in range(worker_count):
queue.put(GUARDIAN)
for proc in processess:
proc.join()
@click.command(help="Enter Bucket name. Optional to enter configuration file to access a different database")
@click.argument('bucket_name')
@click.option(
'--config',
'-c',
help="Pass the configuration file to access the database",
type=click.Path(exists=True)
)
@click.option('--prefix', '-p', help="Pass the prefix of the object to the bucket")
@click.option('--suffix', '-s', default=".yaml", help="Defines the suffix of the metadata_docs that will be used to load datasets. For AWS PDS bucket use MTL.txt")
@click.option('--start_date', help="Pass the start acquisition date, in YYYY-MM-DD format")
@click.option('--end_date', help="Pass the end acquisition date, in YYYY-MM-DD format")
@click.option('--archive', is_flag=True, help="If true, datasets found in the specified bucket and prefix will be archived")
@click.option('--unsafe', is_flag=True, help="If true, YAML will be parsed unsafely. Only use on trusted datasets. Only valid if suffix is yaml")
@click.option('--sources_policy', default="verify", help="verify, ensure, skip")
def main(bucket_name, config, prefix, suffix, start_date, end_date, archive, unsafe, sources_policy):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
action = archive_document if archive else add_dataset
iterate_datasets(bucket_name, config, prefix, suffix, start_date, end_date, action, unsafe, sources_policy)
if __name__ == "__main__":
main()
|
webhaak.py
|
import binascii
import json
import logging
import os
import subprocess
from datetime import datetime, timedelta
from functools import update_wrapper
from multiprocessing import Process
import git
import pushover
import strictyaml
from flask import (Flask, Response, abort, current_app, jsonify, make_response,
request)
from strictyaml import Bool, Map, MapPattern, Optional, Str
import settings
app = Flask(__name__)
app.debug = settings.DEBUG
app.logger.setLevel(logging.DEBUG)
# Log will rotate daily with a max history of LOG_BACKUP_COUNT
fh = logging.FileHandler(
settings.LOG_LOCATION
)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
app.logger.addHandler(fh)
# strictyaml schema for project settings
schema = MapPattern(
Str(),
Map(
{
"appkey": Str(),
"triggers": MapPattern(Str(), Map({
"triggerkey": Str(),
Optional("notify"): Bool(),
Optional("notify_on_error"): Bool(),
Optional("repo"): Str(),
Optional("repoparent"): Str(),
Optional("branch"): Str(),
Optional("command"): Str(),
Optional("authors"): MapPattern(Str(), Str()),
}))
}
)
)
# Load the configuration of the various projects/hooks
with open(settings.PROJECTS_FILE, 'r') as pf:
projects = strictyaml.load(pf.read(), schema).data
def notify_user(result, config):
"""Send a PushOver message if configured, after git and command have run
result is a dictionary with fields:
command_result
status: 'OK' | 'error'
type: 'commanderror'
message
"""
try:
triggerconfig = config[1]
projectname = '{}>{}'.format(config[0], triggerconfig['title'])
title = ''
branch = 'master'
command = 'n/a'
repo = 'n/a'
if 'command' in triggerconfig:
command = triggerconfig['command']
if 'branch' in triggerconfig:
branch = triggerconfig['branch']
if 'repo' in triggerconfig:
repo = triggerconfig['repo']
message = 'repo: {}\nbranch: {}\ncommand: {}\nruntime: {}'.format(
repo,
branch,
command,
result['runtime']
)
if result['status'] == 'OK':
title = "Hook for {} ran successfully".format(projectname)
else:
title = "Hook for {} failed: {}".format(projectname, result['type'])
message = message + '\n\n{}'.format(result['message'])
logging.debug(message)
logging.info('Sending notification...')
# TODO: option to send to Telegram chat
client = pushover.Client(settings.PUSHOVER_USERKEY, api_token=settings.PUSHOVER_APPTOKEN)
client.send_message(message, title=title)
logging.info('Notification sent')
except AttributeError:
logging.warning('Notification through PushOver failed because of missing configuration')
def get_trigger_settings(appkey, triggerkey):
"""Look up the trigger and return the repo and command to be updated and fired
:param appkey: application key part of the url
:param triggerkey: trigger key part of the url, sub part of the config
:return: tuple with project info and the trigger config
"""
for project in projects:
if projects[project]['appkey'] == appkey:
for trigger in projects[project]['triggers']:
if projects[project]['triggers'][trigger]['triggerkey'] == triggerkey:
triggerconfig = projects[project]['triggers'][trigger]
triggerconfig['title'] = trigger
return (project, triggerconfig)
return None
def get_repo_basename(repo_url):
"""Extract repository basename from its url, as that will be the name of directory it will be cloned into"""
result = os.path.basename(repo_url)
filename, file_extension = os.path.splitext(result)
if file_extension == '.git':
# Strip the .git from the name, as Git will do the same on non-bare checkouts
result = filename
return result
def get_repo_version(repo_dir):
"""Gets version of Git repo, based on latest tag, number of commits since, and latest commit hash
:param repo_dir: path to the Git repository
:return: string with version
"""
# Make sure the working directory is our project
try:
version = subprocess.check_output(["git", "describe", "--always", "--tags"], stderr=None, cwd=repo_dir).strip()
except subprocess.CalledProcessError:
version = ''
try:
# byte string needs to be converted to a string
version = version.decode("utf-8")
except AttributeError:
# version already was a str
pass
return version
def fetchinfo_to_str(fetchinfo):
"""git.remote.FetchInfo to human readable representation"""
result = fetchinfo[0].note
return result
def update_repo(config):
"""Update (pull) the Git repo"""
projectname = config[0]
triggerconfig = config[1]
repo_url = triggerconfig['repo']
repo_parent = settings.REPOS_CACHE_DIR
if 'repoparent' in triggerconfig and triggerconfig['repoparent']:
repo_parent = triggerconfig['repoparent']
app.logger.info('[%s] Updating %s', projectname, repo_url)
app.logger.info('[%s] Repo parent %s', projectname, repo_parent)
# Ensure cache dir for webhaak exists and is writable
if not os.path.exists(repo_parent):
os.makedirs(repo_parent) # throws OSError if repo_parent is not writable
# TODO: check whether dir exists with different repository
repo_dir = os.path.join(repo_parent, get_repo_basename(repo_url))
app.logger.info('[%s] Repo dir %s', projectname, repo_dir)
if os.path.isdir(repo_dir):
# Repo already exists locally, do a pull
app.logger.info('[%s] Repo exists, pull', projectname)
apprepo = git.Repo(repo_dir)
origin = apprepo.remote('origin')
result = fetchinfo_to_str(origin.fetch()) # assure we actually have data. fetch() returns useful information
app.logger.info('[%s] Fetch result: %s', projectname, result)
else:
# Repo needs to be cloned
app.logger.info('[%s] Repo does not exist yet, clone', projectname)
apprepo = git.Repo.init(repo_dir)
origin = apprepo.create_remote('origin', repo_url)
origin.fetch() # assure we actually have data. fetch() returns useful information
# Setup a local tracking branch of a remote branch
apprepo.create_head('master', origin.refs.master).set_tracking_branch(origin.refs.master)
branch = 'master'
if 'branch' in triggerconfig:
branch = triggerconfig['branch']
app.logger.info('[%s] checkout() branch \'%s\'', projectname, branch)
result = str(apprepo.git.checkout(branch))
# pull (so really update) the checked out branch to latest commit
origin.pull()
app.logger.info('[%s] Done pulling branch \'%s\'', projectname, branch)
return result
def run_command(config, hook_info):
"""Run the command(s) defined for this trigger"""
projectname = config[0]
triggerconfig = config[1]
if 'command' not in triggerconfig:
# No command to execute, return
app.logger.info('[%s] No command to execute', projectname)
return None
command = triggerconfig['command']
# Replace some placeholders to be used in executing scripts from one of the repos
repo_parent = settings.REPOS_CACHE_DIR
if 'repoparent' in triggerconfig and triggerconfig['repoparent']:
repo_parent = triggerconfig['repoparent']
if 'repo' in triggerconfig:
repo_url = triggerconfig['repo']
command = command.replace('REPODIR', os.path.join(repo_parent, get_repo_basename(repo_url)))
command = command.replace('CACHEDIR', settings.REPOS_CACHE_DIR)
if 'REPOVERSION' in command:
version = get_repo_version(os.path.join(repo_parent, projectname))
command = command.replace('REPOVERSION', version)
for key in hook_info:
if isinstance(hook_info[key], str):
command = command.replace(key.upper(), hook_info[key].replace('"', '\\"'))
command = command.strip() # ensure no weird linefeeds and superfluous whitespace are there
app.logger.info('[%s] Executing `%s`', projectname, command)
# TODO: capture_output is new in Python 3.7, replaces stdout and stderr
# result = subprocess.run(command_parts, capture_output=True, check=True, shell=True, universal_newlines=True)
result = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True
)
return result
def do_pull_andor_command(config, hook_info):
"""Asynchronous task, performing the git pulling and the specified scripting inside a Process"""
projectname = config[0]
starttime = datetime.now()
result = {'application': projectname}
result['trigger'] = config[1]
if 'repo' in config[1]:
try:
result['repo_result'] = update_repo(config)
app.logger.info('[%s] result repo: %s', projectname, str(result['repo_result']))
except git.GitCommandError as e:
result = {'status': 'error', 'type': 'giterror', 'message': str(e)}
app.logger.error('[%s] giterror: %s', projectname, str(e))
result['runtime'] = datetime.now() - starttime
notify_user(result, config)
return
except (OSError, KeyError) as e:
result = {'status': 'error', 'type': 'oserror', 'message': str(e)}
app.logger.error('[%s] oserror: %s', projectname, str(e))
result['runtime'] = datetime.now() - starttime
notify_user(result, config)
return
cmdresult = run_command(config, hook_info)
if cmdresult and cmdresult.returncode == 0:
app.logger.info('[%s] success for command: %s', projectname, str(cmdresult.stdout))
result['status'] = 'OK'
elif not cmdresult:
app.logger.info('[%s] no command configured', projectname)
result['status'] = 'OK'
else:
result['status'] = 'error'
result['type'] = 'commanderror'
result['message'] = cmdresult.stderr.strip()
# TODO: seperate logfiles per job? Filename then based on appkey_triggerkey_timestamp.log
app.logger.error(
'[%s] commanderror with returncode %s: %s',
projectname,
str(cmdresult.returncode),
cmdresult.stderr
)
app.logger.error('[%s] stdout: %s', projectname, cmdresult.stdout)
app.logger.error('[%s] stderr: %s', projectname, cmdresult.stderr)
result['runtime'] = datetime.now() - starttime
if (
('notify' not in config[1] or config[1]['notify'])
or (result['status'] == 'error' and ('notify_on_error' in config[1] and config[1]['notify_on_error']))
):
notify_user(result, config)
# == API request support functions/mixins ======
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
"""Decorator to send the correct cross-domain headers
src: https://blog.skyred.fi/articles/better-crossdomain-snippet-for-flask.html
"""
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
class APIException(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
rv['status_code'] = self.status_code
return rv
class InvalidAPIUsage(APIException):
status_code = 400
@app.errorhandler(404)
def page_not_found(e):
return jsonify(error=404, text=str(e)), 404
@app.errorhandler(InvalidAPIUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
response.mimetype = 'application/json'
return response
# == Web app endpoints ======
@app.route('/')
def indexpage():
app.logger.debug('Root page requested')
return 'Welcome to <a href="https://github.com/aquatix/webhaak">Webhaak</a>, see the documentation to how to setup and use webhooks.'
@app.route('/admin/<secretkey>/list', methods=['GET'])
@crossdomain(origin='*')
def listtriggers(secretkey):
"""List the appkeys and triggerkeys"""
app.logger.debug('Trigger list requested')
try:
if secretkey != settings.SECRETKEY:
app.logger.debug('Secret key incorrect trying to list triggers')
abort(404)
except AttributeError:
app.logger.debug('Secret key not found trying to list triggers')
abort(404)
server_url = request.host_url
result = {}
for project in projects:
result[project] = {
'title': project,
'appkey': projects[project]['appkey'],
'triggers': [],
}
for trigger in projects[project]['triggers']:
result[project]['triggers'].append(
{
'title': trigger,
'triggerkey': projects[project]['triggers'][trigger]['triggerkey'],
'url': '{}app/{}/{}'.format(
server_url,
projects[project]['appkey'],
projects[project]['triggers'][trigger]['triggerkey']
)
}
)
return Response(
json.dumps({'projects': result}), status=200, mimetype='application/json'
)
@app.route('/app/<appkey>/<triggerkey>', methods=['GET', 'OPTIONS', 'POST'])
@crossdomain(origin='*')
def apptrigger(appkey, triggerkey):
"""Fire the trigger described by the configuration under `triggerkey`
:param appkey: application key part of the url
:param triggerkey: trigger key part of the url, sub part of the config
:return: json Response
"""
app.logger.info('%s on appkey: %s triggerkey: %s', request.method, appkey, triggerkey)
config = get_trigger_settings(appkey, triggerkey)
if config is None:
app.logger.error('appkey/triggerkey combo not found')
return Response(json.dumps({'status': 'Error'}), status=404, mimetype='application/json')
hook_info = {}
hook_info['event_type'] = 'push'
sentry_message = False
if request.method == 'POST':
if request.headers.get('X-Gitea-Event'):
vcs_source = 'Gitea'
elif request.headers.get('X-Gogs-Event'):
vcs_source = 'Gogs'
elif request.headers.get('X-GitHub-Event'):
vcs_source = 'GitHub'
elif request.headers.get('X-Event-Key'):
# Other option is to check for User-Agent: Bitbucket-Webhooks/2.0
vcs_source = 'BitBucket'
# Examples: pullrequest:fulfilled pullrequest:created
event_key = request.headers.get('X-Event-Key')
app.logger.debug('BitBucket event: %s', event_key)
if 'pullrequest:' in event_key:
hook_info['pullrequest_status'] = request.headers.get('X-Event-Key').split(':')[1].strip()
if hook_info['pullrequest_status'] == 'fulfilled':
hook_info['event_type'] = 'merge'
elif hook_info['pullrequest_status'] == 'created':
hook_info['event_type'] = 'new'
elif request.headers.get('Sentry-Trace'):
app.logger.debug('Sentry webhook')
sentry_message = True
vcs_source = 'n/a'
else:
vcs_source = '<unknown>'
hook_info['vcs_source'] = vcs_source
payload = request.get_json()
app.logger.debug(payload)
url = ''
if payload:
if 'repository' in payload:
if 'html_url' in payload['repository']:
url = payload['repository']['html_url']
elif 'links' in payload['repository']:
# BitBucket
url = payload['repository']['links']['html']['href']
# Likely some ping was sent, check if so
if request.headers.get('X-GitHub-Event') == "ping":
app.logger.info(
'received %s ping for %s hook: %s ',
vcs_source,
payload['repository']['full_name'],
url
)
return json.dumps({'msg': 'Hi!'})
if (
request.headers.get('X-GitHub-Event') == "push"
or request.headers.get('X-Gitea-Event') == "push"
or request.headers.get('X-Gogs-Event') == "push"
or request.headers.get('X-Event-Key') == "repo:push"
):
event_info = 'received push from {} for '.format(vcs_source)
elif sentry_message:
event_info = 'received push from Sentry for '
else:
app.logger.info(
'received wrong event type from %s for %s hook: %s',
vcs_source,
payload['repository']['full_name'],
url
)
return json.dumps({'msg': "wrong event type"})
if payload:
if 'push' in payload:
# BitBucket, which has a completely different format
app.logger.debug('Amount of changes in this push: %d', len(payload['push']['changes']))
hook_info['commit_before'] = None # When a branch is created, old is null; use as default
# Only take info from the first change item
if payload['push']['changes'][0]['old']:
# Info on the previous commit is available (so not a new branch)
hook_info['commit_before'] = payload['push']['changes'][0]['old']['target']['hash']
hook_info['commit_after'] = payload['push']['changes'][0]['new']['target']['hash']
hook_info['compare_url'] = payload['push']['changes'][0]['links']['html']['href']
hook_info['commits'] = []
for commit in payload['push']['changes'][0]['commits']:
commit_info = {'hash': commit['hash']}
if 'user' in commit['author']:
if 'username' in commit['author']['user']:
commit_info['name'] = commit['author']['user']['username']
else:
commit_info['name'] = commit['author']['user']['nickname']
commit_info['email'] = commit['author']['raw']
hook_info['commits'].append(commit_info)
if 'pullrequest' in payload:
# BitBucket pullrequest event
if 'rendered' in payload['pullrequest']:
hook_info['pullrequest_title'] = payload['pullrequest']['rendered']['title']['raw']
hook_info['pullrequest_description'] = payload['pullrequest']['rendered']['description']['raw']
if 'close_source_branch' in payload['pullrequest']:
hook_info['pullrequest_close_source_branch'] = payload['pullrequest']['close_source_branch']
if 'state' in payload['pullrequest']:
if payload['pullrequest']['state'] == 'MERGED':
hook_info['pullrequest_author'] = payload['pullrequest']['author']['display_name']
hook_info['pullrequest_closed_by'] = payload['pullrequest']['closed_by']['display_name']
if 'links' in payload['pullrequest'] and 'html' in payload['pullrequest']['links']:
hook_info['pullrequest_url'] = payload['pullrequest']['links']['html']
if 'ref' in payload:
hook_info['ref'] = payload['ref']
if 'heads' in payload['ref']:
hook_info['branch'] = payload['ref'].replace('refs/heads/', '')
elif 'tags' in payload['ref']:
hook_info['tag'] = payload['ref'].replace('refs/tags/', '')
if 'repository' in payload:
event_info += payload['repository']['full_name']
hook_info['reponame'] = payload['repository']['full_name']
if 'name' in payload['repository']:
hook_info['project_name'] = payload['repository']['name']
if 'actor' in payload:
# BitBucket pusher; no email address known here though
event_info += ' by ' + payload['actor']['nickname']
if 'display_name' in payload['actor']:
event_info += ' ({})'.format(payload['actor']['display_name'])
hook_info['username'] = payload['actor']['nickname']
app.logger.debug(config[1])
if 'authors' in config[1]:
# Look up the email address in the known authors list of the project
for author in config[1]['authors']:
if author.lower() == hook_info['username'].lower():
hook_info['email'] = config[1]['authors'][author]
break
if 'pusher' in payload:
if vcs_source in ('Gitea', 'Gogs'):
event_info += ' by ' + payload['pusher']['username']
hook_info['username'] = payload['pusher']['username']
hook_info['email'] = payload['pusher']['email']
elif vcs_source == 'GitHub':
event_info += ' by ' + payload['pusher']['name']
hook_info['username'] = payload['pusher']['name']
hook_info['email'] = payload['pusher']['email']
if 'compare' in payload:
event_info += ', compare: ' + payload['compare']
hook_info['compare_url'] = payload['compare']
elif 'compare_url' in payload:
# GitHub, gitea, gogs
event_info += ', compare: ' + payload['compare_url']
hook_info['compare_url'] = payload['compare_url']
if 'before' in payload:
hook_info['commit_before'] = payload['before']
if 'after' in payload:
hook_info['commit_after'] = payload['after']
if 'commits' in payload:
# Gather info on the commits included in this push
hook_info['commits'] = []
for commit in payload['commits']:
commit_info = {}
if 'sha' in commit:
commit_info['hash'] = commit['sha']
elif 'id' in commit:
commit_info['hash'] = commit['id']
if 'author' in commit:
commit_info['name'] = commit['author']['name']
commit_info['email'] = commit['author']['email']
hook_info['commits'].append(commit_info)
if sentry_message:
event_info += payload['project_name']
sentry_fields = ['project_name', 'culprit', 'url', 'message']
for field in sentry_fields:
if field in payload:
hook_info[field] = payload[field]
hook_info['stacktrace'] = 'Not available'
if 'event' in payload and payload['event'] and 'title' in payload['event']:
hook_info['title'] = payload['event']['title']
stacktrace = []
if 'exception' in payload['event']:
# Always take the last set
frames = payload['event']['exception']['values'][-1]['stacktrace']['frames']
for frame in frames:
frame_message = '*{}* in *{}* at line *{}*'.format(
frame['filename'],
frame['function'],
frame['lineno']
)
stacktrace.append(frame_message)
# Sentry puts the items of the trace from last to first in the json, so reverse the trace
stacktrace.reverse()
elif 'logentry' in payload['event']:
if 'message' in payload['event']['logentry']:
stacktrace.append(payload['event']['logentry']['message'])
if 'formatted' in payload['event']['logentry']:
stacktrace.append(payload['event']['logentry']['formatted'])
app.logger.debug(stacktrace)
hook_info['stacktrace'] = '\\n'.join(stacktrace)
else:
'{}unknown, as no json was received. Check that {} webhook content type is application/json'.format(
event_info,
vcs_source
)
app.logger.debug(hook_info)
app.logger.info(event_info)
p = Process(target=do_pull_andor_command, args=(config, hook_info,))
p.start()
return Response(
json.dumps({
'status': 'OK',
'message': 'Command accepted and will be run in the background'
}), status=200, mimetype='application/json'
)
@app.route('/monitor/monitor.html')
@app.route('/monitor/')
@app.route('/monitor')
def monitor():
"""Monitoring ping"""
result = 'OK'
return result
def generatekey():
"""Generate a random ascii string to be used as identifier"""
return binascii.hexlify(os.urandom(24))
@app.cli.command()
def printappkey():
"""Generate new appkey"""
print(generatekey())
@app.route('/getappkey')
def getappkey():
"""Generate new appkey"""
return Response(json.dumps({'key': generatekey().decode('utf-8')}), status=200, mimetype='application/json')
if __name__ == '__main__':
if not settings.DEBUG:
app.run(port=settings.PORT, debug=settings.DEBUG)
else:
app.run(host='0.0.0.0', port=settings.PORT, debug=settings.DEBUG)
|
test_enum.py
|
import enum
import doctest
import inspect
import os
import pydoc
import sys
import unittest
import threading
import builtins as bltns
from collections import OrderedDict
from datetime import date
from enum import Enum, IntEnum, StrEnum, EnumType, Flag, IntFlag, unique, auto
from enum import STRICT, CONFORM, EJECT, KEEP, _simple_enum, _test_simple_enum
from enum import verify, UNIQUE, CONTINUOUS, NAMED_FLAGS, ReprEnum
from enum import member, nonmember
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from test.support import ALWAYS_EQ
from test.support import threading_helper
from textwrap import dedent
from datetime import timedelta
python_version = sys.version_info[:2]
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(enum))
if os.path.exists('Doc/library/enum.rst'):
tests.addTests(doctest.DocFileSuite(
'../../Doc/library/enum.rst',
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
))
return tests
MODULE = __name__
SHORT_MODULE = MODULE.split('.')[-1]
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
sunder_names = '_bad_', '_good_', '_what_ho_'
dunder_names = '__mal__', '__bien__', '__que_que__'
private_names = '_MyEnum__private', '_MyEnum__still_private'
private_and_sunder_names = '_MyEnum__private_', '_MyEnum__also_private_'
random_names = 'okay', '_semi_private', '_weird__', '_MyEnum__'
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_sunder(self):
for name in self.sunder_names + self.private_and_sunder_names:
self.assertTrue(enum._is_sunder(name), '%r is a not sunder name?' % name)
for name in self.dunder_names + self.private_names + self.random_names:
self.assertFalse(enum._is_sunder(name), '%r is a sunder name?' % name)
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_dunder(self):
for name in self.dunder_names:
self.assertTrue(enum._is_dunder(name), '%r is a not dunder name?' % name)
for name in self.sunder_names + self.private_names + self.private_and_sunder_names + self.random_names:
self.assertFalse(enum._is_dunder(name), '%r is a dunder name?' % name)
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
def test_is_private(self):
for name in self.private_names + self.private_and_sunder_names:
self.assertTrue(enum._is_private('MyEnum', name), '%r is a not private name?')
for name in self.sunder_names + self.dunder_names + self.random_names:
self.assertFalse(enum._is_private('MyEnum', name), '%r is a private name?')
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# for global repr tests
@enum.global_enum
class HeadlightsK(IntFlag, boundary=enum.KEEP):
OFF_K = 0
LOW_BEAM_K = auto()
HIGH_BEAM_K = auto()
FOG_K = auto()
@enum.global_enum
class HeadlightsC(IntFlag, boundary=enum.CONFORM):
OFF_C = 0
LOW_BEAM_C = auto()
HIGH_BEAM_C = auto()
FOG_C = auto()
@enum.global_enum
class NoName(Flag):
ONE = 1
TWO = 2
# tests
class _EnumTests:
"""
Test for behavior that is the same across the different types of enumerations.
"""
values = None
def setUp(self):
class BaseEnum(self.enum_type):
@enum.property
def first(self):
return '%s is first!' % self.name
class MainEnum(BaseEnum):
first = auto()
second = auto()
third = auto()
if issubclass(self.enum_type, Flag):
dupe = 3
else:
dupe = third
self.MainEnum = MainEnum
#
class NewStrEnum(self.enum_type):
def __str__(self):
return self.name.upper()
first = auto()
self.NewStrEnum = NewStrEnum
#
class NewFormatEnum(self.enum_type):
def __format__(self, spec):
return self.name.upper()
first = auto()
self.NewFormatEnum = NewFormatEnum
#
class NewStrFormatEnum(self.enum_type):
def __str__(self):
return self.name.title()
def __format__(self, spec):
return ''.join(reversed(self.name))
first = auto()
self.NewStrFormatEnum = NewStrFormatEnum
#
class NewBaseEnum(self.enum_type):
def __str__(self):
return self.name.title()
def __format__(self, spec):
return ''.join(reversed(self.name))
class NewSubEnum(NewBaseEnum):
first = auto()
self.NewSubEnum = NewSubEnum
#
self.is_flag = False
self.names = ['first', 'second', 'third']
if issubclass(MainEnum, StrEnum):
self.values = self.names
elif MainEnum._member_type_ is str:
self.values = ['1', '2', '3']
elif issubclass(self.enum_type, Flag):
self.values = [1, 2, 4]
self.is_flag = True
self.dupe2 = MainEnum(5)
else:
self.values = self.values or [1, 2, 3]
#
if not getattr(self, 'source_values', False):
self.source_values = self.values
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def assertFormatIsStr(self, spec, member):
self.assertEqual(spec.format(member), spec.format(str(member)))
def test_attribute_deletion(self):
class Season(self.enum_type):
SPRING = auto()
SUMMER = auto()
AUTUMN = auto()
#
def spam(cls):
pass
#
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
#
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_basics(self):
TE = self.MainEnum
if self.is_flag:
self.assertEqual(repr(TE), "<flag 'MainEnum'>")
self.assertEqual(str(TE), "<flag 'MainEnum'>")
self.assertEqual(format(TE), "<flag 'MainEnum'>")
self.assertTrue(TE(5) is self.dupe2)
else:
self.assertEqual(repr(TE), "<enum 'MainEnum'>")
self.assertEqual(str(TE), "<enum 'MainEnum'>")
self.assertEqual(format(TE), "<enum 'MainEnum'>")
self.assertEqual(list(TE), [TE.first, TE.second, TE.third])
self.assertEqual(
[m.name for m in TE],
self.names,
)
self.assertEqual(
[m.value for m in TE],
self.values,
)
self.assertEqual(
[m.first for m in TE],
['first is first!', 'second is first!', 'third is first!']
)
for member, name in zip(TE, self.names, strict=True):
self.assertIs(TE[name], member)
for member, value in zip(TE, self.values, strict=True):
self.assertIs(TE(value), member)
if issubclass(TE, StrEnum):
self.assertTrue(TE.dupe is TE('third') is TE['dupe'])
elif TE._member_type_ is str:
self.assertTrue(TE.dupe is TE('3') is TE['dupe'])
elif issubclass(TE, Flag):
self.assertTrue(TE.dupe is TE(3) is TE['dupe'])
else:
self.assertTrue(TE.dupe is TE(self.values[2]) is TE['dupe'])
def test_bool_is_true(self):
class Empty(self.enum_type):
pass
self.assertTrue(Empty)
#
self.assertTrue(self.MainEnum)
for member in self.MainEnum:
self.assertTrue(member)
def test_changing_member_fails(self):
MainEnum = self.MainEnum
with self.assertRaises(AttributeError):
self.MainEnum.second = 'really first'
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
@unittest.expectedFailure
def test_contains_er(self):
MainEnum = self.MainEnum
self.assertIn(MainEnum.third, MainEnum)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
self.source_values[1] in MainEnum
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'first' in MainEnum
val = MainEnum.dupe
self.assertIn(val, MainEnum)
#
class OtherEnum(Enum):
one = auto()
two = auto()
self.assertNotIn(OtherEnum.two, MainEnum)
@unittest.skipIf(
python_version < (3, 12),
'__contains__ works only with enum memmbers before 3.12',
)
@unittest.expectedFailure
def test_contains_tf(self):
MainEnum = self.MainEnum
self.assertIn(MainEnum.first, MainEnum)
self.assertTrue(self.source_values[0] in MainEnum)
self.assertFalse('first' in MainEnum)
val = MainEnum.dupe
self.assertIn(val, MainEnum)
#
class OtherEnum(Enum):
one = auto()
two = auto()
self.assertNotIn(OtherEnum.two, MainEnum)
def test_dir_on_class(self):
TE = self.MainEnum
self.assertEqual(set(dir(TE)), set(enum_dir(TE)))
def test_dir_on_item(self):
TE = self.MainEnum
self.assertEqual(set(dir(TE.first)), set(member_dir(TE.first)))
def test_dir_with_added_behavior(self):
class Test(self.enum_type):
this = auto()
these = auto()
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertTrue('wowser' not in dir(Test))
self.assertTrue('wowser' in dir(Test.this))
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(self.enum_type):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = auto()
self.assertTrue('invisible' not in dir(SubEnum))
self.assertTrue('invisible' in dir(SubEnum.sample))
def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self):
# see issue40084
class SuperEnum(self.enum_type):
def __new__(cls, *value, **kwds):
new = self.enum_type._member_type_.__new__
if self.enum_type._member_type_ is object:
obj = new(cls)
else:
if isinstance(value[0], tuple):
create_value ,= value[0]
else:
create_value = value
obj = new(cls, *create_value)
obj._value_ = value[0] if len(value) == 1 else value
obj.description = 'test description'
return obj
class SubEnum(SuperEnum):
sample = self.source_values[1]
self.assertTrue('description' not in dir(SubEnum))
self.assertTrue('description' in dir(SubEnum.sample), dir(SubEnum.sample))
def test_enum_in_enum_out(self):
Main = self.MainEnum
self.assertIs(Main(Main.first), Main.first)
def test_hash(self):
MainEnum = self.MainEnum
mapping = {}
mapping[MainEnum.first] = '1225'
mapping[MainEnum.second] = '0315'
mapping[MainEnum.third] = '0704'
self.assertEqual(mapping[MainEnum.second], '0315')
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(self.enum_type):
mro = 9
with self.assertRaises(ValueError):
class Wrong(self.enum_type):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(self.enum_type):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(self.enum_type):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(self.enum_type):
_any_name_ = 9
def test_object_str_override(self):
"check that setting __str__ to object's is not reset to Enum's"
class Generic(self.enum_type):
item = self.source_values[2]
def __repr__(self):
return "%s.test" % (self._name_, )
__str__ = object.__str__
self.assertEqual(str(Generic.item), 'item.test')
def test_overridden_str(self):
NS = self.NewStrEnum
self.assertEqual(str(NS.first), NS.first.name.upper())
self.assertEqual(format(NS.first), NS.first.name.upper())
def test_overridden_str_format(self):
NSF = self.NewStrFormatEnum
self.assertEqual(str(NSF.first), NSF.first.name.title())
self.assertEqual(format(NSF.first), ''.join(reversed(NSF.first.name)))
def test_overridden_str_format_inherited(self):
NSE = self.NewSubEnum
self.assertEqual(str(NSE.first), NSE.first.name.title())
self.assertEqual(format(NSE.first), ''.join(reversed(NSE.first.name)))
def test_programmatic_function_string(self):
MinorEnum = self.enum_type('MinorEnum', 'june july august')
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
values = self.values
if self.enum_type is StrEnum:
values = ['june','july','august']
for month, av in zip('june july august'.split(), values):
e = MinorEnum[month]
self.assertEqual(e.value, av, list(MinorEnum))
self.assertEqual(e.name, month)
if MinorEnum._member_type_ is not object and issubclass(MinorEnum, MinorEnum._member_type_):
self.assertEqual(e, av)
else:
self.assertNotEqual(e, av)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
self.assertIs(e, MinorEnum(av))
def test_programmatic_function_string_list(self):
MinorEnum = self.enum_type('MinorEnum', ['june', 'july', 'august'])
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
values = self.values
if self.enum_type is StrEnum:
values = ['june','july','august']
for month, av in zip('june july august'.split(), values):
e = MinorEnum[month]
self.assertEqual(e.value, av)
self.assertEqual(e.name, month)
if MinorEnum._member_type_ is not object and issubclass(MinorEnum, MinorEnum._member_type_):
self.assertEqual(e, av)
else:
self.assertNotEqual(e, av)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
self.assertIs(e, MinorEnum(av))
def test_programmatic_function_iterable(self):
MinorEnum = self.enum_type(
'MinorEnum',
(('june', self.source_values[0]), ('july', self.source_values[1]), ('august', self.source_values[2]))
)
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
for month, av in zip('june july august'.split(), self.values):
e = MinorEnum[month]
self.assertEqual(e.value, av)
self.assertEqual(e.name, month)
if MinorEnum._member_type_ is not object and issubclass(MinorEnum, MinorEnum._member_type_):
self.assertEqual(e, av)
else:
self.assertNotEqual(e, av)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
self.assertIs(e, MinorEnum(av))
def test_programmatic_function_from_dict(self):
MinorEnum = self.enum_type(
'MinorEnum',
OrderedDict((('june', self.source_values[0]), ('july', self.source_values[1]), ('august', self.source_values[2])))
)
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
for month, av in zip('june july august'.split(), self.values):
e = MinorEnum[month]
if MinorEnum._member_type_ is not object and issubclass(MinorEnum, MinorEnum._member_type_):
self.assertEqual(e, av)
else:
self.assertNotEqual(e, av)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
self.assertIs(e, MinorEnum(av))
def test_repr(self):
TE = self.MainEnum
if self.is_flag:
self.assertEqual(repr(TE(0)), "<MainEnum: 0>")
self.assertEqual(repr(TE.dupe), "<MainEnum.dupe: 3>")
self.assertEqual(repr(self.dupe2), "<MainEnum.first|third: 5>")
elif issubclass(TE, StrEnum):
self.assertEqual(repr(TE.dupe), "<MainEnum.third: 'third'>")
else:
self.assertEqual(repr(TE.dupe), "<MainEnum.third: %r>" % (self.values[2], ), TE._value_repr_)
for name, value, member in zip(self.names, self.values, TE, strict=True):
self.assertEqual(repr(member), "<MainEnum.%s: %r>" % (member.name, member.value))
def test_repr_override(self):
class Generic(self.enum_type):
first = auto()
second = auto()
third = auto()
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Generic.third),
"don't you just love shades of third?",
)
def test_inherited_repr(self):
class MyEnum(self.enum_type):
def __repr__(self):
return "My name is %s." % self.name
class MySubEnum(MyEnum):
this = auto()
that = auto()
theother = auto()
self.assertEqual(repr(MySubEnum.that), "My name is that.")
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.MainEnum)),
[self.MainEnum.third, self.MainEnum.second, self.MainEnum.first],
)
class _PlainOutputTests:
def test_str(self):
TE = self.MainEnum
if self.is_flag:
self.assertEqual(str(TE(0)), "MainEnum(0)")
self.assertEqual(str(TE.dupe), "MainEnum.dupe")
self.assertEqual(str(self.dupe2), "MainEnum.first|third")
else:
self.assertEqual(str(TE.dupe), "MainEnum.third")
for name, value, member in zip(self.names, self.values, TE, strict=True):
self.assertEqual(str(member), "MainEnum.%s" % (member.name, ))
def test_format(self):
TE = self.MainEnum
if self.is_flag:
self.assertEqual(format(TE.dupe), "MainEnum.dupe")
self.assertEqual(format(self.dupe2), "MainEnum.first|third")
else:
self.assertEqual(format(TE.dupe), "MainEnum.third")
for name, value, member in zip(self.names, self.values, TE, strict=True):
self.assertEqual(format(member), "MainEnum.%s" % (member.name, ))
def test_overridden_format(self):
NF = self.NewFormatEnum
self.assertEqual(str(NF.first), "NewFormatEnum.first", '%s %r' % (NF.__str__, NF.first))
self.assertEqual(format(NF.first), "FIRST")
def test_format_specs(self):
TE = self.MainEnum
self.assertFormatIsStr('{}', TE.second)
self.assertFormatIsStr('{:}', TE.second)
self.assertFormatIsStr('{:20}', TE.second)
self.assertFormatIsStr('{:^20}', TE.second)
self.assertFormatIsStr('{:>20}', TE.second)
self.assertFormatIsStr('{:<20}', TE.second)
self.assertFormatIsStr('{:5.2}', TE.second)
class _MixedOutputTests:
def test_str(self):
TE = self.MainEnum
if self.is_flag:
self.assertEqual(str(TE.dupe), "MainEnum.dupe")
self.assertEqual(str(self.dupe2), "MainEnum.first|third")
else:
self.assertEqual(str(TE.dupe), "MainEnum.third")
for name, value, member in zip(self.names, self.values, TE, strict=True):
self.assertEqual(str(member), "MainEnum.%s" % (member.name, ))
def test_format(self):
TE = self.MainEnum
if self.is_flag:
self.assertEqual(format(TE.dupe), "MainEnum.dupe")
self.assertEqual(format(self.dupe2), "MainEnum.first|third")
else:
self.assertEqual(format(TE.dupe), "MainEnum.third")
for name, value, member in zip(self.names, self.values, TE, strict=True):
self.assertEqual(format(member), "MainEnum.%s" % (member.name, ))
def test_overridden_format(self):
NF = self.NewFormatEnum
self.assertEqual(str(NF.first), "NewFormatEnum.first")
self.assertEqual(format(NF.first), "FIRST")
def test_format_specs(self):
TE = self.MainEnum
self.assertFormatIsStr('{}', TE.first)
self.assertFormatIsStr('{:}', TE.first)
self.assertFormatIsStr('{:20}', TE.first)
self.assertFormatIsStr('{:^20}', TE.first)
self.assertFormatIsStr('{:>20}', TE.first)
self.assertFormatIsStr('{:<20}', TE.first)
self.assertFormatIsStr('{:5.2}', TE.first)
class _MinimalOutputTests:
def test_str(self):
TE = self.MainEnum
if self.is_flag:
self.assertEqual(str(TE.dupe), "3")
self.assertEqual(str(self.dupe2), "5")
else:
self.assertEqual(str(TE.dupe), str(self.values[2]))
for name, value, member in zip(self.names, self.values, TE, strict=True):
self.assertEqual(str(member), str(value))
def test_format(self):
TE = self.MainEnum
if self.is_flag:
self.assertEqual(format(TE.dupe), "3")
self.assertEqual(format(self.dupe2), "5")
else:
self.assertEqual(format(TE.dupe), format(self.values[2]))
for name, value, member in zip(self.names, self.values, TE, strict=True):
self.assertEqual(format(member), format(value))
def test_overridden_format(self):
NF = self.NewFormatEnum
self.assertEqual(str(NF.first), str(self.values[0]))
self.assertEqual(format(NF.first), "FIRST")
def test_format_specs(self):
TE = self.MainEnum
self.assertFormatIsValue('{}', TE.third)
self.assertFormatIsValue('{:}', TE.third)
self.assertFormatIsValue('{:20}', TE.third)
self.assertFormatIsValue('{:^20}', TE.third)
self.assertFormatIsValue('{:>20}', TE.third)
self.assertFormatIsValue('{:<20}', TE.third)
if TE._member_type_ is float:
self.assertFormatIsValue('{:n}', TE.third)
self.assertFormatIsValue('{:5.2}', TE.third)
self.assertFormatIsValue('{:f}', TE.third)
class _FlagTests:
def test_default_missing_with_wrong_type_value(self):
with self.assertRaisesRegex(
ValueError,
"'RED' is not a valid TestFlag.Color",
) as ctx:
self.MainEnum('RED')
self.assertIs(ctx.exception.__context__, None)
class TestPlainEnum(_EnumTests, _PlainOutputTests, unittest.TestCase):
enum_type = Enum
class TestPlainFlag(_EnumTests, _PlainOutputTests, unittest.TestCase):
enum_type = Flag
class TestIntEnum(_EnumTests, _MinimalOutputTests, unittest.TestCase):
enum_type = IntEnum
class TestStrEnum(_EnumTests, _MinimalOutputTests, unittest.TestCase):
enum_type = StrEnum
class TestIntFlag(_EnumTests, _MinimalOutputTests, unittest.TestCase):
enum_type = IntFlag
class TestMixedInt(_EnumTests, _MixedOutputTests, unittest.TestCase):
class enum_type(int, Enum): pass
class TestMixedStr(_EnumTests, _MixedOutputTests, unittest.TestCase):
class enum_type(str, Enum): pass
class TestMixedIntFlag(_EnumTests, _MixedOutputTests, unittest.TestCase):
class enum_type(int, Flag): pass
class TestMixedDate(_EnumTests, _MixedOutputTests, unittest.TestCase):
values = [date(2021, 12, 25), date(2020, 3, 15), date(2019, 11, 27)]
source_values = [(2021, 12, 25), (2020, 3, 15), (2019, 11, 27)]
class enum_type(date, Enum):
def _generate_next_value_(name, start, count, last_values):
values = [(2021, 12, 25), (2020, 3, 15), (2019, 11, 27)]
return values[count]
class TestMinimalDate(_EnumTests, _MinimalOutputTests, unittest.TestCase):
values = [date(2023, 12, 1), date(2016, 2, 29), date(2009, 1, 1)]
source_values = [(2023, 12, 1), (2016, 2, 29), (2009, 1, 1)]
class enum_type(date, ReprEnum):
def _generate_next_value_(name, start, count, last_values):
values = [(2023, 12, 1), (2016, 2, 29), (2009, 1, 1)]
return values[count]
class TestMixedFloat(_EnumTests, _MixedOutputTests, unittest.TestCase):
values = [1.1, 2.2, 3.3]
class enum_type(float, Enum):
def _generate_next_value_(name, start, count, last_values):
values = [1.1, 2.2, 3.3]
return values[count]
class TestMinimalFloat(_EnumTests, _MinimalOutputTests, unittest.TestCase):
values = [4.4, 5.5, 6.6]
class enum_type(float, ReprEnum):
def _generate_next_value_(name, start, count, last_values):
values = [4.4, 5.5, 6.6]
return values[count]
class TestSpecial(unittest.TestCase):
"""
various operations that are not attributable to every possible enum
"""
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
#
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
#
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
#
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
#
self.assertNotEqual(Season.SPRING, 1)
#
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
#
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
@unittest.skip('to-do list')
def test_dir_with_custom_dunders(self):
class PlainEnum(Enum):
pass
cls_dir = dir(PlainEnum)
self.assertNotIn('__repr__', cls_dir)
self.assertNotIn('__str__', cls_dir)
self.assertNotIn('__format__', cls_dir)
self.assertNotIn('__init__', cls_dir)
#
class MyEnum(Enum):
def __repr__(self):
return object.__repr__(self)
def __str__(self):
return object.__repr__(self)
def __format__(self):
return object.__repr__(self)
def __init__(self):
pass
cls_dir = dir(MyEnum)
self.assertIn('__repr__', cls_dir)
self.assertIn('__str__', cls_dir)
self.assertIn('__format__', cls_dir)
self.assertIn('__init__', cls_dir)
def test_duplicate_name_error(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
#
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
#
with self.assertRaises(TypeError):
class Color(Enum):
@enum.property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_enum_of_types(self):
"""Support using Enum to refer to types deliberately."""
class MyTypes(Enum):
i = int
f = float
s = str
self.assertEqual(MyTypes.i.value, int)
self.assertEqual(MyTypes.f.value, float)
self.assertEqual(MyTypes.s.value, str)
class Foo:
pass
class Bar:
pass
class MyTypes2(Enum):
a = Foo
b = Bar
self.assertEqual(MyTypes2.a.value, Foo)
self.assertEqual(MyTypes2.b.value, Bar)
class SpamEnumNotInner:
pass
class SpamEnum(Enum):
spam = SpamEnumNotInner
self.assertEqual(SpamEnum.spam.value, SpamEnumNotInner)
@unittest.skipIf(
python_version >= (3, 13),
'inner classes are not members',
)
def test_nested_classes_in_enum_are_members(self):
"""
Check for warnings pre-3.13
"""
with self.assertWarnsRegex(DeprecationWarning, 'will not become a member'):
class Outer(Enum):
a = 1
b = 2
class Inner(Enum):
foo = 10
bar = 11
self.assertTrue(isinstance(Outer.Inner, Outer))
self.assertEqual(Outer.a.value, 1)
self.assertEqual(Outer.Inner.value.foo.value, 10)
self.assertEqual(
list(Outer.Inner.value),
[Outer.Inner.value.foo, Outer.Inner.value.bar],
)
self.assertEqual(
list(Outer),
[Outer.a, Outer.b, Outer.Inner],
)
@unittest.skipIf(
python_version < (3, 13),
'inner classes are still members',
)
def test_nested_classes_in_enum_are_not_members(self):
"""Support locally-defined nested classes."""
class Outer(Enum):
a = 1
b = 2
class Inner(Enum):
foo = 10
bar = 11
self.assertTrue(isinstance(Outer.Inner, type))
self.assertEqual(Outer.a.value, 1)
self.assertEqual(Outer.Inner.foo.value, 10)
self.assertEqual(
list(Outer.Inner),
[Outer.Inner.foo, Outer.Inner.bar],
)
self.assertEqual(
list(Outer),
[Outer.a, Outer.b],
)
def test_nested_classes_in_enum_with_nonmember(self):
class Outer(Enum):
a = 1
b = 2
@nonmember
class Inner(Enum):
foo = 10
bar = 11
self.assertTrue(isinstance(Outer.Inner, type))
self.assertEqual(Outer.a.value, 1)
self.assertEqual(Outer.Inner.foo.value, 10)
self.assertEqual(
list(Outer.Inner),
[Outer.Inner.foo, Outer.Inner.bar],
)
self.assertEqual(
list(Outer),
[Outer.a, Outer.b],
)
def test_enum_of_types_with_nonmember(self):
"""Support using Enum to refer to types deliberately."""
class MyTypes(Enum):
i = int
f = nonmember(float)
s = str
self.assertEqual(MyTypes.i.value, int)
self.assertTrue(MyTypes.f is float)
self.assertEqual(MyTypes.s.value, str)
class Foo:
pass
class Bar:
pass
class MyTypes2(Enum):
a = Foo
b = nonmember(Bar)
self.assertEqual(MyTypes2.a.value, Foo)
self.assertTrue(MyTypes2.b is Bar)
class SpamEnumIsInner:
pass
class SpamEnum(Enum):
spam = nonmember(SpamEnumIsInner)
self.assertTrue(SpamEnum.spam is SpamEnumIsInner)
def test_nested_classes_in_enum_with_member(self):
"""Support locally-defined nested classes."""
class Outer(Enum):
a = 1
b = 2
@member
class Inner(Enum):
foo = 10
bar = 11
self.assertTrue(isinstance(Outer.Inner, Outer))
self.assertEqual(Outer.a.value, 1)
self.assertEqual(Outer.Inner.value.foo.value, 10)
self.assertEqual(
list(Outer.Inner.value),
[Outer.Inner.value.foo, Outer.Inner.value.bar],
)
self.assertEqual(
list(Outer),
[Outer.a, Outer.b, Outer.Inner],
)
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(list(Huh), [Huh.name, Huh.value])
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_inherited_data_type(self):
class HexInt(int):
__qualname__ = 'HexInt'
def __repr__(self):
return hex(self)
class MyEnum(HexInt, enum.Enum):
__qualname__ = 'MyEnum'
A = 1
B = 2
C = 3
self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>')
globals()['HexInt'] = HexInt
globals()['MyEnum'] = MyEnum
test_pickle_dump_load(self.assertIs, MyEnum.A)
test_pickle_dump_load(self.assertIs, MyEnum)
#
class SillyInt(HexInt):
__qualname__ = 'SillyInt'
pass
class MyOtherEnum(SillyInt, enum.Enum):
__qualname__ = 'MyOtherEnum'
D = 4
E = 5
F = 6
self.assertIs(MyOtherEnum._member_type_, SillyInt)
globals()['SillyInt'] = SillyInt
globals()['MyOtherEnum'] = MyOtherEnum
test_pickle_dump_load(self.assertIs, MyOtherEnum.E)
test_pickle_dump_load(self.assertIs, MyOtherEnum)
#
# This did not work in 3.10, but does now with pickling by name
class UnBrokenInt(int):
__qualname__ = 'UnBrokenInt'
def __new__(cls, value):
return int.__new__(cls, value)
class MyUnBrokenEnum(UnBrokenInt, Enum):
__qualname__ = 'MyUnBrokenEnum'
G = 7
H = 8
I = 9
self.assertIs(MyUnBrokenEnum._member_type_, UnBrokenInt)
self.assertIs(MyUnBrokenEnum(7), MyUnBrokenEnum.G)
globals()['UnBrokenInt'] = UnBrokenInt
globals()['MyUnBrokenEnum'] = MyUnBrokenEnum
test_pickle_dump_load(self.assertIs, MyUnBrokenEnum.I)
test_pickle_dump_load(self.assertIs, MyUnBrokenEnum)
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_programmatic_function_type(self):
MinorEnum = Enum('MinorEnum', 'june july august', type=int)
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = MinorEnum(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
def test_programmatic_function_string_with_start(self):
MinorEnum = Enum('MinorEnum', 'june july august', start=10)
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = MinorEnum(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
def test_programmatic_function_type_with_start(self):
MinorEnum = Enum('MinorEnum', 'june july august', type=int, start=30)
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = MinorEnum(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
def test_programmatic_function_string_list_with_start(self):
MinorEnum = Enum('MinorEnum', ['june', 'july', 'august'], start=20)
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = MinorEnum(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
def test_programmatic_function_type_from_subclass(self):
MinorEnum = IntEnum('MinorEnum', 'june july august')
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = MinorEnum(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
def test_programmatic_function_type_from_subclass_with_start(self):
MinorEnum = IntEnum('MinorEnum', 'june july august', start=40)
lst = list(MinorEnum)
self.assertEqual(len(lst), len(MinorEnum))
self.assertEqual(len(MinorEnum), 3, MinorEnum)
self.assertEqual(
[MinorEnum.june, MinorEnum.july, MinorEnum.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = MinorEnum(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, MinorEnum)
self.assertIs(type(e), MinorEnum)
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_reserved_sunder_error(self):
with self.assertRaisesRegex(
ValueError,
'_sunder_ names, such as ._bad_., are reserved',
):
class Bad(Enum):
_bad_ = 1
def test_too_many_data_types(self):
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(str, int, Enum):
One = 1
class MyStr(str):
def hello(self):
return 'hello, %s' % self
class MyInt(int):
def repr(self):
return hex(self)
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(MyStr, MyInt, Enum):
One = 1
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_pickle_nested_class(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_global_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_pickle_explodes(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
#
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
#
with self.assertRaisesRegex(TypeError, "<enum .EvenMoreColor.> cannot extend <enum .Color.>"):
class EvenMoreColor(Color, IntEnum):
chartruese = 7
#
with self.assertRaisesRegex(TypeError, "<enum .Foo.> cannot extend <enum .Color.>"):
Color('Foo', ('pink', 'black'))
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
# tests that need to be evalualted for moving
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
temp._cls_name = cls
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@bltns.property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@bltns.property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@bltns.property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@bltns.property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@bltns.property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@bltns.property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(ALWAYS_EQ, OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, ALWAYS_EQ)
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
self.assertEqual(Test.test.value, 1)
class Base2(Enum):
@enum.property
def flash(self):
return 'flashy dynamic'
class Test(Base2):
flash = 1
self.assertEqual(Test.flash.flash, 'flashy dynamic')
self.assertEqual(Test.flash.value, 1)
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@enum.property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_default_missing_no_chained_exception(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
def test_missing_override(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_missing_exceptions_reset(self):
import gc
import weakref
#
class TestEnum(enum.Enum):
VAL1 = 'val1'
VAL2 = 'val2'
#
class Class1:
def __init__(self):
# Gracefully handle an exception of our own making
try:
raise ValueError()
except ValueError:
pass
#
class Class2:
def __init__(self):
# Gracefully handle an exception of Enum's making
try:
TestEnum('invalid_value')
except ValueError:
pass
# No strong refs here so these are free to die.
class_1_ref = weakref.ref(Class1())
class_2_ref = weakref.ref(Class2())
#
# The exception raised by Enum used to create a reference loop and thus
# Class2 instances would stick around until the next garbage collection
# cycle, unlike Class1. Verify Class2 no longer does this.
gc.collect() # For PyPy or other GCs.
self.assertIs(class_1_ref(), None)
self.assertIs(class_2_ref(), None)
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__ # needed as of 3.11
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__ # needed as of 3.11
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__ # needed as of 3.11
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__ # needed as of 3.11
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__ # needed as of 3.11
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__ # needed as of 3.11
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__ # needed as of 3.11
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_multiple_mixin_inherited(self):
class MyInt(int):
def __new__(cls, value):
return super().__new__(cls, value)
class HexMixin:
def __repr__(self):
return hex(self)
class MyIntEnum(HexMixin, MyInt, enum.Enum):
__repr__ = HexMixin.__repr__
class Foo(MyIntEnum):
TEST = 1
self.assertTrue(isinstance(Foo.TEST, MyInt))
self.assertEqual(Foo._member_type_, MyInt)
self.assertEqual(repr(Foo.TEST), "0x1")
class Fee(MyIntEnum):
TEST = 1
def __new__(cls, value):
value += 1
member = int.__new__(cls, value)
member._value_ = value
return member
self.assertEqual(Fee.TEST, 2)
def test_multiple_mixin_with_common_data_type(self):
class CaseInsensitiveStrEnum(str, Enum):
@classmethod
def _missing_(cls, value):
for member in cls._member_map_.values():
if member._value_.lower() == value.lower():
return member
return super()._missing_(value)
#
class LenientStrEnum(str, Enum):
def __init__(self, *args):
self._valid = True
@classmethod
def _missing_(cls, value):
unknown = cls._member_type_.__new__(cls, value)
unknown._valid = False
unknown._name_ = value.upper()
unknown._value_ = value
cls._member_map_[value] = unknown
return unknown
@enum.property
def valid(self):
return self._valid
#
class JobStatus(CaseInsensitiveStrEnum, LenientStrEnum):
ACTIVE = "active"
PENDING = "pending"
TERMINATED = "terminated"
#
JS = JobStatus
self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED])
self.assertEqual(JS.ACTIVE, 'active')
self.assertEqual(JS.ACTIVE.value, 'active')
self.assertIs(JS('Active'), JS.ACTIVE)
self.assertTrue(JS.ACTIVE.valid)
missing = JS('missing')
self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED])
self.assertEqual(JS.ACTIVE, 'active')
self.assertEqual(JS.ACTIVE.value, 'active')
self.assertIs(JS('Active'), JS.ACTIVE)
self.assertTrue(JS.ACTIVE.valid)
self.assertTrue(isinstance(missing, JS))
self.assertFalse(missing.valid)
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
def test_strenum(self):
class GoodStrEnum(StrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(GoodStrEnum.one, '1')
self.assertEqual(str(GoodStrEnum.one), '1')
self.assertEqual('{}'.format(GoodStrEnum.one), '1')
self.assertEqual(GoodStrEnum.one, str(GoodStrEnum.one))
self.assertEqual(GoodStrEnum.one, '{}'.format(GoodStrEnum.one))
self.assertEqual(repr(GoodStrEnum.one), "<GoodStrEnum.one: '1'>")
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, StrEnum):
five = '5'
six = '6'
seven = '7'
__str__ = DumbMixin.__str__ # needed as of 3.11
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, StrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(HelloEnum.eight, str(HelloEnum.eight))
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, StrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(GoodbyeEnum.nine, str(GoodbyeEnum.nine))
#
with self.assertRaisesRegex(TypeError, '1 is not a string'):
class FirstFailedStrEnum(StrEnum):
one = 1
two = '2'
with self.assertRaisesRegex(TypeError, "2 is not a string"):
class SecondFailedStrEnum(StrEnum):
one = '1'
two = 2,
three = '3'
with self.assertRaisesRegex(TypeError, '2 is not a string'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = 2
with self.assertRaisesRegex(TypeError, 'encoding must be a string, not %r' % (sys.getdefaultencoding, )):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, 'errors must be a string, not 9'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', 'ascii', 9
def test_custom_strenum(self):
class CustomStrEnum(str, Enum):
pass
class OkayEnum(CustomStrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(OkayEnum.one, '1')
self.assertEqual(str(OkayEnum.one), 'OkayEnum.one')
self.assertEqual('{}'.format(OkayEnum.one), 'OkayEnum.one')
self.assertEqual(repr(OkayEnum.one), "<OkayEnum.one: '1'>")
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, CustomStrEnum):
five = '5'
six = '6'
seven = '7'
__str__ = DumbMixin.__str__ # needed as of 3.11
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, CustomStrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(str(HelloEnum.eight), 'HelloEnum.eight')
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, CustomStrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(str(GoodbyeEnum.nine), 'GoodbyeEnum.nine')
#
class FirstFailedStrEnum(CustomStrEnum):
one = 1 # this will become '1'
two = '2'
class SecondFailedStrEnum(CustomStrEnum):
one = '1'
two = 2, # this will become '2'
three = '3'
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = 2 # this will become '2'
with self.assertRaisesRegex(TypeError, '.encoding. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, '.errors. must be str, not '):
class ThirdFailedStrEnum(CustomStrEnum):
one = '1'
two = b'2', 'ascii', 9
def test_missing_value_error(self):
with self.assertRaisesRegex(TypeError, "_value_ not set in __new__"):
class Combined(str, Enum):
#
def __new__(cls, value, sequence):
enum = str.__new__(cls, value)
if '(' in value:
fis_name, segment = value.split('(', 1)
segment = segment.strip(' )')
else:
fis_name = value
segment = None
enum.fis_name = fis_name
enum.segment = segment
enum.sequence = sequence
return enum
#
def __repr__(self):
return "<%s.%s>" % (self.__class__.__name__, self._name_)
#
key_type = 'An$(1,2)', 0
company_id = 'An$(3,2)', 1
code = 'An$(5,1)', 2
description = 'Bn$', 3
def test_private_variable_is_normal_attribute(self):
class Private(Enum):
__corporal = 'Radar'
__major_ = 'Hoolihan'
self.assertEqual(Private._Private__corporal, 'Radar')
self.assertEqual(Private._Private__major_, 'Hoolihan')
def test_exception_for_member_from_member_access(self):
with self.assertRaisesRegex(AttributeError, "<enum .Di.> member has no attribute .NO."):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
def test_dynamic_members_with_static_methods(self):
#
foo_defines = {'FOO_CAT': 'aloof', 'BAR_DOG': 'friendly', 'FOO_HORSE': 'big'}
class Foo(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
})
def upper(self):
return self.value.upper()
self.assertEqual(list(Foo), [Foo.FOO_CAT, Foo.FOO_HORSE])
self.assertEqual(Foo.FOO_CAT.value, 'aloof')
self.assertEqual(Foo.FOO_HORSE.upper(), 'BIG')
#
with self.assertRaisesRegex(TypeError, "'FOO_CAT' already defined as 'aloof'"):
class FooBar(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
},
**{'FOO_CAT': 'small'},
)
def upper(self):
return self.value.upper()
def test_repr_with_dataclass(self):
"ensure dataclass-mixin has correct repr()"
from dataclasses import dataclass
@dataclass
class Foo:
__qualname__ = 'Foo'
a: int = 0
class Entries(Foo, Enum):
ENTRY1 = Foo(1)
self.assertEqual(repr(Entries.ENTRY1), '<Entries.ENTRY1: Foo(a=1)>')
def test_repr_with_non_data_type_mixin(self):
# non-data_type is a mixin that doesn't define __new__
class Foo:
def __init__(self, a):
self.a = a
def __repr__(self):
return f'Foo(a={self.a!r})'
class Entries(Foo, Enum):
ENTRY1 = Foo(1)
self.assertEqual(repr(Entries.ENTRY1), '<Entries.ENTRY1: Foo(a=1)>')
def test_value_backup_assign(self):
# check that enum will add missing values when custom __new__ does not
class Some(Enum):
def __new__(cls, val):
return object.__new__(cls)
x = 1
y = 2
self.assertEqual(Some.x.value, 1)
self.assertEqual(Some.y.value, 2)
def test_custom_flag_bitwise(self):
class MyIntFlag(int, Flag):
ONE = 1
TWO = 2
FOUR = 4
self.assertTrue(isinstance(MyIntFlag.ONE | MyIntFlag.TWO, MyIntFlag), MyIntFlag.ONE | MyIntFlag.TWO)
self.assertTrue(isinstance(MyIntFlag.ONE | 2, MyIntFlag))
class TestOrder(unittest.TestCase):
"test usage of the `_order_` attribute"
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class OldTestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_boundary(self):
self.assertIs(enum.Flag._boundary_, STRICT)
class Iron(Flag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(Flag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(Flag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
class Bizarre(Flag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value 7', Iron, 7)
#
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
#
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
#
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
@unittest.expectedFailure
def test_contains_er(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'BLACK' in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'RO' in Open
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
1 in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
1 in Open
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
@unittest.expectedFailure
def test_contains_tf(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
self.assertFalse('BLACK' in Color)
self.assertFalse('RO' in Open)
self.assertTrue(1 in Color)
self.assertTrue(1 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_number_reset_and_order_cleanup(self):
class Confused(Flag):
_order_ = 'ONE TWO FOUR DOS EIGHT SIXTEEN'
ONE = auto()
TWO = auto()
FOUR = auto()
DOS = 2
EIGHT = auto()
SIXTEEN = auto()
self.assertEqual(
list(Confused),
[Confused.ONE, Confused.TWO, Confused.FOUR, Confused.EIGHT, Confused.SIXTEEN])
self.assertIs(Confused.TWO, Confused.DOS)
self.assertEqual(Confused.DOS._value_, 2)
self.assertEqual(Confused.EIGHT._value_, 8)
self.assertEqual(Confused.SIXTEEN._value_, 16)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'invalid flag value .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
def test_init_subclass(self):
class MyEnum(Flag):
def __init_subclass__(cls, **kwds):
super().__init_subclass__(**kwds)
self.assertFalse(cls.__dict__.get('_test', False))
cls._test1 = 'MyEnum'
#
class TheirEnum(MyEnum):
def __init_subclass__(cls, **kwds):
super(TheirEnum, cls).__init_subclass__(**kwds)
cls._test2 = 'TheirEnum'
class WhoseEnum(TheirEnum):
def __init_subclass__(cls, **kwds):
pass
class NoEnum(WhoseEnum):
ONE = 1
self.assertEqual(TheirEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test2'], 'TheirEnum')
self.assertFalse(NoEnum.__dict__.get('_test1', False))
self.assertFalse(NoEnum.__dict__.get('_test2', False))
#
class OurEnum(MyEnum):
def __init_subclass__(cls, **kwds):
cls._test2 = 'OurEnum'
class WhereEnum(OurEnum):
def __init_subclass__(cls, **kwds):
pass
class NeverEnum(WhereEnum):
ONE = 1
self.assertEqual(OurEnum.__dict__['_test1'], 'MyEnum')
self.assertFalse(WhereEnum.__dict__.get('_test1', False))
self.assertEqual(WhereEnum.__dict__['_test2'], 'OurEnum')
self.assertFalse(NeverEnum.__dict__.get('_test1', False))
self.assertFalse(NeverEnum.__dict__.get('_test2', False))
class OldTestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
R = 1 << 2
W = 1 << 1
X = 1 << 0
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
class Skip(IntFlag):
FIRST = 1
SECOND = 2
EIGHTH = 8
def test_type(self):
Perm = self.Perm
self.assertTrue(Perm._member_type_ is int)
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_global_repr_keep(self):
self.assertEqual(
repr(HeadlightsK(0)),
'%s.OFF_K' % SHORT_MODULE,
)
self.assertEqual(
repr(HeadlightsK(2**0 + 2**2 + 2**3)),
'%(m)s.LOW_BEAM_K|%(m)s.FOG_K|8' % {'m': SHORT_MODULE},
)
self.assertEqual(
repr(HeadlightsK(2**3)),
'%(m)s.HeadlightsK(8)' % {'m': SHORT_MODULE},
)
def test_global_repr_conform1(self):
self.assertEqual(
repr(HeadlightsC(0)),
'%s.OFF_C' % SHORT_MODULE,
)
self.assertEqual(
repr(HeadlightsC(2**0 + 2**2 + 2**3)),
'%(m)s.LOW_BEAM_C|%(m)s.FOG_C' % {'m': SHORT_MODULE},
)
self.assertEqual(
repr(HeadlightsC(2**3)),
'%(m)s.OFF_C' % {'m': SHORT_MODULE},
)
def test_global_enum_str(self):
self.assertEqual(str(NoName.ONE & NoName.TWO), 'NoName(0)')
self.assertEqual(str(NoName(0)), 'NoName(0)')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), '4')
self.assertEqual(format(Perm.R | Perm.X, ''), '5')
#
class NewPerm(IntFlag):
R = 1 << 2
W = 1 << 1
X = 1 << 0
def __str__(self):
return self._name_
self.assertEqual(format(NewPerm.R, ''), 'R')
self.assertEqual(format(NewPerm.R | Perm.X, ''), 'R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, (~i).value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_boundary(self):
self.assertIs(enum.IntFlag._boundary_, EJECT)
class Iron(IntFlag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(IntFlag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(IntFlag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
#
class Bizarre(IntFlag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value 5', Iron, 5)
#
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
#
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
#
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
@unittest.skipIf(
python_version >= (3, 12),
'__contains__ now returns True/False for all inputs',
)
@unittest.expectedFailure
def test_contains_er(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'GREEN' in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
'RW' in Open
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
2 in Color
with self.assertRaises(TypeError):
with self.assertWarns(DeprecationWarning):
2 in Open
@unittest.skipIf(
python_version < (3, 12),
'__contains__ only works with enum memmbers before 3.12',
)
@unittest.expectedFailure
def test_contains_tf(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertTrue(Color.GREEN in Open)
self.assertTrue(Open.RW in Color)
self.assertFalse('GREEN' in Color)
self.assertFalse('RW' in Open)
self.assertTrue(2 in Color)
self.assertTrue(2 in Open)
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), '4')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
__str__ = StrMixin.__str__
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
#
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@verify(UNIQUE)
class Silly(Enum):
one = 1
two = 'dos'
name = 3
#
@verify(UNIQUE)
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestVerify(unittest.TestCase):
def test_continuous(self):
@verify(CONTINUOUS)
class Auto(Enum):
FIRST = auto()
SECOND = auto()
THIRD = auto()
FORTH = auto()
#
@verify(CONTINUOUS)
class Manual(Enum):
FIRST = 3
SECOND = 4
THIRD = 5
FORTH = 6
#
with self.assertRaisesRegex(ValueError, 'invalid enum .Missing.: missing values 5, 6, 7, 8, 9, 10, 12'):
@verify(CONTINUOUS)
class Missing(Enum):
FIRST = 3
SECOND = 4
THIRD = 11
FORTH = 13
#
with self.assertRaisesRegex(ValueError, 'invalid flag .Incomplete.: missing values 32'):
@verify(CONTINUOUS)
class Incomplete(Flag):
FIRST = 4
SECOND = 8
THIRD = 16
FORTH = 64
#
with self.assertRaisesRegex(ValueError, 'invalid flag .StillIncomplete.: missing values 16'):
@verify(CONTINUOUS)
class StillIncomplete(Flag):
FIRST = 4
SECOND = 8
THIRD = 11
FORTH = 32
def test_composite(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertEqual(Bizarre.b.value, 3)
self.assertEqual(Bizarre.c.value, 4)
self.assertEqual(Bizarre.d.value, 6)
with self.assertRaisesRegex(
ValueError,
"invalid Flag 'Bizarre': aliases b and d are missing combined values of 0x3 .use enum.show_flag_values.value. for details.",
):
@verify(NAMED_FLAGS)
class Bizarre(Flag):
b = 3
c = 4
d = 6
#
self.assertEqual(enum.show_flag_values(3), [1, 2])
class Bizarre(IntFlag):
b = 3
c = 4
d = 6
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertEqual(Bizarre.b.value, 3)
self.assertEqual(Bizarre.c.value, 4)
self.assertEqual(Bizarre.d.value, 6)
with self.assertRaisesRegex(
ValueError,
"invalid Flag 'Bizarre': alias d is missing value 0x2 .use enum.show_flag_values.value. for details.",
):
@verify(NAMED_FLAGS)
class Bizarre(IntFlag):
c = 4
d = 6
self.assertEqual(enum.show_flag_values(2), [2])
def test_unique_clean(self):
@verify(UNIQUE)
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
#
@verify(UNIQUE)
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@verify(UNIQUE)
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@verify(UNIQUE)
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@verify(UNIQUE)
class Silly(Enum):
one = 1
two = 'dos'
name = 3
#
@verify(UNIQUE)
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestInternals(unittest.TestCase):
sunder_names = '_bad_', '_good_', '_what_ho_'
dunder_names = '__mal__', '__bien__', '__que_que__'
private_names = '_MyEnum__private', '_MyEnum__still_private'
private_and_sunder_names = '_MyEnum__private_', '_MyEnum__also_private_'
random_names = 'okay', '_semi_private', '_weird__', '_MyEnum__'
def test_sunder(self):
for name in self.sunder_names + self.private_and_sunder_names:
self.assertTrue(enum._is_sunder(name), '%r is a not sunder name?' % name)
for name in self.dunder_names + self.private_names + self.random_names:
self.assertFalse(enum._is_sunder(name), '%r is a sunder name?' % name)
def test_dunder(self):
for name in self.dunder_names:
self.assertTrue(enum._is_dunder(name), '%r is a not dunder name?' % name)
for name in self.sunder_names + self.private_names + self.private_and_sunder_names + self.random_names:
self.assertFalse(enum._is_dunder(name), '%r is a dunder name?' % name)
def test_is_private(self):
for name in self.private_names + self.private_and_sunder_names:
self.assertTrue(enum._is_private('MyEnum', name), '%r is a not private name?')
for name in self.sunder_names + self.dunder_names + self.random_names:
self.assertFalse(enum._is_private('MyEnum', name), '%r is a private name?')
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_auto_order_wierd(self):
weird_auto = auto()
weird_auto.value = 'pathological case'
class Color(Enum):
red = weird_auto
def _generate_next_value_(name, start, count, last):
return name
blue = auto()
self.assertEqual(list(Color), [Color.red, Color.blue])
self.assertEqual(Color.red.value, 'pathological case')
self.assertEqual(Color.blue.value, 'blue')
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
class TestEnumTypeSubclassing(unittest.TestCase):
pass
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)
|\x20\x20
| A collection of name/value pairs.
|\x20\x20
| Access them by:
|\x20\x20
| - attribute access::
|\x20\x20
| >>> Color.CYAN
| <Color.CYAN: 1>
|\x20\x20
| - value lookup:
|\x20\x20
| >>> Color(1)
| <Color.CYAN: 1>
|\x20\x20
| - name lookup:
|\x20\x20
| >>> Color['CYAN']
| <Color.CYAN: 1>
|\x20\x20
| Enumerations can be iterated over, and know how many members they have:
|\x20\x20
| >>> len(Color)
| 3
|\x20\x20
| >>> list(Color)
| [<Color.CYAN: 1>, <Color.MAGENTA: 2>, <Color.YELLOW: 3>]
|\x20\x20
| Methods can be added to enumerations, and members can have their own
| attributes -- see the documentation for details.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| CYAN = <Color.CYAN: 1>
|\x20\x20
| MAGENTA = <Color.MAGENTA: 2>
|\x20\x20
| YELLOW = <Color.YELLOW: 3>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Methods inherited from enum.EnumType:
|\x20\x20
| __contains__(member) from enum.EnumType
| Return True if member is a member of this enum
| raises TypeError if member is not an enum member
|\x20\x20\x20\x20\x20\x20
| note: in 3.12 TypeError will no longer be raised, and True will also be
| returned if member is the value of a member in this enum
|\x20\x20
| __getitem__(name) from enum.EnumType
| Return the member matching `name`.
|\x20\x20
| __iter__() from enum.EnumType
| Return members in definition order.
|\x20\x20
| __len__() from enum.EnumType
| Return the number of members (no aliases)
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumType:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| YELLOW = <Color.YELLOW: 3>
|\x20\x20
| MAGENTA = <Color.MAGENTA: 2>
|\x20\x20
| CYAN = <Color.CYAN: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumType:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
CYAN = 1
MAGENTA = 2
YELLOW = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text, result)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumType),
('__doc__', '...'),
('__members__', self.Color.__members__),
('__module__', __name__),
('YELLOW', self.Color.YELLOW),
('MAGENTA', self.Color.MAGENTA),
('CYAN', self.Color.CYAN),
('name', Enum.__dict__['name']),
('value', Enum.__dict__['value']),
('__len__', self.Color.__len__),
('__contains__', self.Color.__contains__),
('__name__', 'Color'),
('__getitem__', self.Color.__getitem__),
('__qualname__', 'TestStdLib.Color'),
('__init_subclass__', getattr(self.Color, '__init_subclass__')),
('__iter__', self.Color.__iter__),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(set(values.keys()), set(result.keys()))
failed = False
for k in values.keys():
if k == '__doc__':
# __doc__ is huge, not comparing
continue
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumType),
Attribute(name='__contains__', kind='method',
defining_class=EnumType, object=self.Color.__contains__),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='...'),
Attribute(name='__getitem__', kind='method',
defining_class=EnumType, object=self.Color.__getitem__),
Attribute(name='__iter__', kind='method',
defining_class=EnumType, object=self.Color.__iter__),
Attribute(name='__init_subclass__', kind='class method',
defining_class=object, object=getattr(self.Color, '__init_subclass__')),
Attribute(name='__len__', kind='method',
defining_class=EnumType, object=self.Color.__len__),
Attribute(name='__members__', kind='property',
defining_class=EnumType, object=EnumType.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='__name__', kind='data',
defining_class=self.Color, object='Color'),
Attribute(name='__qualname__', kind='data',
defining_class=self.Color, object='TestStdLib.Color'),
Attribute(name='YELLOW', kind='data',
defining_class=self.Color, object=self.Color.YELLOW),
Attribute(name='MAGENTA', kind='data',
defining_class=self.Color, object=self.Color.MAGENTA),
Attribute(name='CYAN', kind='data',
defining_class=self.Color, object=self.Color.CYAN),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
for v in values:
try:
v.name
except AttributeError:
print(v)
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
self.assertEqual(
len(values), len(result),
"%s != %s" % ([a.name for a in values], [a.name for a in result])
)
failed = False
for v, r in zip(values, result):
if r.name in ('__init_subclass__', '__doc__'):
# not sure how to make the __init_subclass_ Attributes match
# so as long as there is one, call it good
# __doc__ is too big to check exactly, so treat the same as __init_subclass__
for name in ('name','kind','defining_class'):
if getattr(v, name) != getattr(r, name):
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
elif r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_test_simple_enum(self):
@_simple_enum(Enum)
class SimpleColor:
CYAN = 1
MAGENTA = 2
YELLOW = 3
class CheckedColor(Enum):
CYAN = 1
MAGENTA = 2
YELLOW = 3
self.assertTrue(_test_simple_enum(CheckedColor, SimpleColor) is None)
SimpleColor.MAGENTA._value_ = 9
self.assertRaisesRegex(
TypeError, "enum mismatch",
_test_simple_enum, CheckedColor, SimpleColor,
)
class CheckedMissing(IntFlag, boundary=KEEP):
SIXTY_FOUR = 64
ONE_TWENTY_EIGHT = 128
TWENTY_FORTY_EIGHT = 2048
ALL = 2048 + 128 + 64 + 12
CM = CheckedMissing
self.assertEqual(list(CheckedMissing), [CM.SIXTY_FOUR, CM.ONE_TWENTY_EIGHT, CM.TWENTY_FORTY_EIGHT])
#
@_simple_enum(IntFlag, boundary=KEEP)
class Missing:
SIXTY_FOUR = 64
ONE_TWENTY_EIGHT = 128
TWENTY_FORTY_EIGHT = 2048
ALL = 2048 + 128 + 64 + 12
M = Missing
self.assertEqual(list(CheckedMissing), [M.SIXTY_FOUR, M.ONE_TWENTY_EIGHT, M.TWENTY_FORTY_EIGHT])
#
_test_simple_enum(CheckedMissing, Missing)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum, not_exported={'bin', 'show_flag_values'})
def test_doc_1(self):
class Single(Enum):
ONE = 1
self.assertEqual(
Single.__doc__,
dedent("""\
A collection of name/value pairs.
Access them by:
- attribute access::
>>> Single.ONE
<Single.ONE: 1>
- value lookup:
>>> Single(1)
<Single.ONE: 1>
- name lookup:
>>> Single['ONE']
<Single.ONE: 1>
Enumerations can be iterated over, and know how many members they have:
>>> len(Single)
1
>>> list(Single)
[<Single.ONE: 1>]
Methods can be added to enumerations, and members can have their own
attributes -- see the documentation for details.
"""))
def test_doc_2(self):
class Double(Enum):
ONE = 1
TWO = 2
self.assertEqual(
Double.__doc__,
dedent("""\
A collection of name/value pairs.
Access them by:
- attribute access::
>>> Double.ONE
<Double.ONE: 1>
- value lookup:
>>> Double(1)
<Double.ONE: 1>
- name lookup:
>>> Double['ONE']
<Double.ONE: 1>
Enumerations can be iterated over, and know how many members they have:
>>> len(Double)
2
>>> list(Double)
[<Double.ONE: 1>, <Double.TWO: 2>]
Methods can be added to enumerations, and members can have their own
attributes -- see the documentation for details.
"""))
def test_doc_1(self):
class Triple(Enum):
ONE = 1
TWO = 2
THREE = 3
self.assertEqual(
Triple.__doc__,
dedent("""\
A collection of name/value pairs.
Access them by:
- attribute access::
>>> Triple.ONE
<Triple.ONE: 1>
- value lookup:
>>> Triple(1)
<Triple.ONE: 1>
- name lookup:
>>> Triple['ONE']
<Triple.ONE: 1>
Enumerations can be iterated over, and know how many members they have:
>>> len(Triple)
3
>>> list(Triple)
[<Triple.ONE: 1>, <Triple.TWO: 2>, <Triple.THREE: 3>]
Methods can be added to enumerations, and members can have their own
attributes -- see the documentation for details.
"""))
def test_doc_1(self):
class Quadruple(Enum):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
self.assertEqual(
Quadruple.__doc__,
dedent("""\
A collection of name/value pairs.
Access them by:
- attribute access::
>>> Quadruple.ONE
<Quadruple.ONE: 1>
- value lookup:
>>> Quadruple(1)
<Quadruple.ONE: 1>
- name lookup:
>>> Quadruple['ONE']
<Quadruple.ONE: 1>
Enumerations can be iterated over, and know how many members they have:
>>> len(Quadruple)
4
>>> list(Quadruple)[:3]
[<Quadruple.ONE: 1>, <Quadruple.TWO: 2>, <Quadruple.THREE: 3>]
Methods can be added to enumerations, and members can have their own
attributes -- see the documentation for details.
"""))
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
CONVERT_STRING_TEST_NAME_D = 5
CONVERT_STRING_TEST_NAME_C = 5
CONVERT_STRING_TEST_NAME_B = 5
CONVERT_STRING_TEST_NAME_A = 5 # This one should sort first.
CONVERT_STRING_TEST_NAME_E = 5
CONVERT_STRING_TEST_NAME_F = 5
# global names for StrEnum._convert_ test
CONVERT_STR_TEST_2 = 'goodbye'
CONVERT_STR_TEST_1 = 'hello'
# We also need values that cannot be compared:
UNCOMPARABLE_A = 5
UNCOMPARABLE_C = (9, 1) # naming order is broken on purpose
UNCOMPARABLE_B = 'value'
COMPLEX_C = 1j
COMPLEX_A = 2j
COMPLEX_B = 3j
class _ModuleWrapper:
"""We use this class as a namespace for swapping modules."""
def __init__(self, module):
self.__dict__.update(module.__dict__)
class TestConvert(unittest.TestCase):
def tearDown(self):
# Reset the module-level test variables to their original integer
# values, otherwise the already created enum values get converted
# instead.
g = globals()
for suffix in ['A', 'B', 'C', 'D', 'E', 'F']:
g['CONVERT_TEST_NAME_%s' % suffix] = 5
g['CONVERT_STRING_TEST_NAME_%s' % suffix] = 5
for suffix, value in (('A', 5), ('B', (9, 1)), ('C', 'value')):
g['UNCOMPARABLE_%s' % suffix] = value
for suffix, value in (('A', 2j), ('B', 3j), ('C', 1j)):
g['COMPLEX_%s' % suffix] = value
for suffix, value in (('1', 'hello'), ('2', 'goodbye')):
g['CONVERT_STR_TEST_%s' % suffix] = value
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert_int(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
int_dir = dir(int) + [
'CONVERT_TEST_NAME_A', 'CONVERT_TEST_NAME_B', 'CONVERT_TEST_NAME_C',
'CONVERT_TEST_NAME_D', 'CONVERT_TEST_NAME_E', 'CONVERT_TEST_NAME_F',
'CONVERT_TEST_SIGABRT', 'CONVERT_TEST_SIGIOT',
'CONVERT_TEST_EIO', 'CONVERT_TEST_EBUS',
]
extra = [name for name in dir(test_type) if name not in enum_dir(test_type)]
missing = [name for name in enum_dir(test_type) if name not in dir(test_type)]
self.assertEqual(
extra + missing,
[],
msg='extra names: %r; missing names: %r' % (extra, missing),
)
def test_convert_uncomparable(self):
uncomp = enum.Enum._convert_(
'Uncomparable',
MODULE,
filter=lambda x: x.startswith('UNCOMPARABLE_'))
# Should be ordered by `name` only:
self.assertEqual(
list(uncomp),
[uncomp.UNCOMPARABLE_A, uncomp.UNCOMPARABLE_B, uncomp.UNCOMPARABLE_C],
)
def test_convert_complex(self):
uncomp = enum.Enum._convert_(
'Uncomparable',
MODULE,
filter=lambda x: x.startswith('COMPLEX_'))
# Should be ordered by `name` only:
self.assertEqual(
list(uncomp),
[uncomp.COMPLEX_A, uncomp.COMPLEX_B, uncomp.COMPLEX_C],
)
def test_convert_str(self):
test_type = enum.StrEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_STR_'),
as_global=True)
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_STR_TEST_1, 'hello')
self.assertEqual(test_type.CONVERT_STR_TEST_2, 'goodbye')
# Ensure that test_type only picked up names matching the filter.
str_dir = dir(str) + ['CONVERT_STR_TEST_1', 'CONVERT_STR_TEST_2']
extra = [name for name in dir(test_type) if name not in enum_dir(test_type)]
missing = [name for name in enum_dir(test_type) if name not in dir(test_type)]
self.assertEqual(
extra + missing,
[],
msg='extra names: %r; missing names: %r' % (extra, missing),
)
self.assertEqual(repr(test_type.CONVERT_STR_TEST_1), '%s.CONVERT_STR_TEST_1' % SHORT_MODULE)
self.assertEqual(str(test_type.CONVERT_STR_TEST_2), 'goodbye')
self.assertEqual(format(test_type.CONVERT_STR_TEST_1), 'hello')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_TEST_'))
def test_convert_repr_and_str(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
MODULE,
filter=lambda x: x.startswith('CONVERT_STRING_TEST_'),
as_global=True)
self.assertEqual(repr(test_type.CONVERT_STRING_TEST_NAME_A), '%s.CONVERT_STRING_TEST_NAME_A' % SHORT_MODULE)
self.assertEqual(str(test_type.CONVERT_STRING_TEST_NAME_A), '5')
self.assertEqual(format(test_type.CONVERT_STRING_TEST_NAME_A), '5')
# helpers
def enum_dir(cls):
interesting = set([
'__class__', '__contains__', '__doc__', '__getitem__',
'__iter__', '__len__', '__members__', '__module__',
'__name__', '__qualname__',
]
+ cls._member_names_
)
if cls._new_member_ is not object.__new__:
interesting.add('__new__')
if cls.__init_subclass__ is not object.__init_subclass__:
interesting.add('__init_subclass__')
if cls._member_type_ is object:
return sorted(interesting)
else:
# return whatever mixed-in data type has
return sorted(set(dir(cls._member_type_)) | interesting)
def member_dir(member):
if member.__class__._member_type_ is object:
allowed = set(['__class__', '__doc__', '__eq__', '__hash__', '__module__', 'name', 'value'])
else:
allowed = set(dir(member))
for cls in member.__class__.mro():
for name, obj in cls.__dict__.items():
if name[0] == '_':
continue
if isinstance(obj, enum.property):
if obj.fget is not None or name not in member._member_map_:
allowed.add(name)
else:
allowed.discard(name)
else:
allowed.add(name)
return sorted(allowed)
missing = object()
if __name__ == '__main__':
unittest.main()
|
high_availability.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
import uuid
from typing import List, Dict
import grpc
import logging
import threading
from abc import ABC, abstractmethod
from collections import deque
from notification_service.base_notification import Member, BaseEvent
from notification_service.proto import notification_service_pb2_grpc
from notification_service.proto.notification_service_pb2 import NotifyRequest, Notify, NotifyNewMemberRequest
from notification_service.proto.notification_service_pb2_grpc import NotificationServiceStub
from notification_service.util import db
from notification_service.util.db import MemberModel
from notification_service.util.utils import sleep_and_detecting_running, member_to_proto
class NotificationServerHaManager(ABC):
@abstractmethod
def start(self, server_uri, storage, ttl_ms, min_notify_interval, member_updated_condition):
pass
@abstractmethod
def notify_others(self, event: BaseEvent):
pass
@abstractmethod
def get_living_members(self):
pass
@abstractmethod
def add_living_member(self, member: Member):
pass
@abstractmethod
def stop(self):
pass
class HighAvailabilityStorage(ABC):
@abstractmethod
def list_living_members(self, ttl_ms) -> List[Member]:
pass
@abstractmethod
def update_member(self, server_uri, server_uuid):
pass
@abstractmethod
def clear_dead_members(self, ttl_ms):
pass
class DbHighAvailabilityStorage(HighAvailabilityStorage):
def __init__(self, db_conn=None, create_table_if_not_exists=True):
if db_conn is not None:
db.SQL_ALCHEMY_CONN = db_conn
if create_table_if_not_exists:
MemberModel.create_table(db.SQL_ALCHEMY_CONN)
def list_living_members(self, ttl_ms):
return MemberModel.get_living_members(ttl_ms)
def list_dead_members(self, ttl_ms):
return MemberModel.get_dead_members(ttl_ms)
def update_member(self, server_uri, server_uuid):
MemberModel.update_member(server_uri, server_uuid)
def delete_member(self, server_uri=None, server_uuid=None):
MemberModel.delete_member(server_uri, server_uuid)
def clear_dead_members(self, ttl_ms):
MemberModel.clear_dead_members(ttl_ms)
class SimpleNotificationServerHaManager(NotificationServerHaManager):
def __init__(self):
self.server_uri = None
self.storage = None # type: HighAvailabilityStorage
self.ttl_ms = None
self.cached_notify = deque()
self.living_members = []
self.member_connections = {} # type: Dict[str, NotificationServiceStub]
self.running = False
self.heartbeat_thread = None
self.notify_thread = None
self.min_notify_interval_ms = None
self.notified_others_after_start = False
self.member_updated_condition = None
self.uuid = str(uuid.uuid4())
def start(self, server_uri, storage, ttl_ms, min_notify_interval_ms, member_updated_condition):
self.server_uri = server_uri
self.storage = storage
self.ttl_ms = ttl_ms
self.min_notify_interval_ms = min_notify_interval_ms
self.member_updated_condition = member_updated_condition
self.running = True
self.heartbeat_thread = threading.Thread(target=self.start_heartbeat, daemon=True)
self.heartbeat_thread.start()
self.notify_thread = threading.Thread(target=self.start_notify, daemon=True)
self.notify_thread.start()
def start_heartbeat(self):
while self.running:
try:
# do heartbeat
self.storage.clear_dead_members(self.ttl_ms)
self.storage.update_member(self.server_uri, self.uuid)
self.living_members = self.storage.list_living_members(self.ttl_ms)
if not self.notified_others_after_start:
for member in self.living_members:
if member.server_uri == self.server_uri:
continue
channel = grpc.insecure_channel(member.server_uri)
self.member_connections[member.server_uri] = \
notification_service_pb2_grpc.NotificationServiceStub(channel)
try:
self.member_connections[member.server_uri].notifyNewMember(
NotifyNewMemberRequest(member=member_to_proto(
Member(1, self.server_uri, int(time.time_ns() / 1000000))
)))
except grpc.RpcError:
logging.error("Notify new member to '%s' failed." % member.server_uri,
exc_info=True)
self.notified_others_after_start = True
except Exception as e:
logging.error("Exception thrown when send heartbeat to the HA storage.",
exc_info=True)
sleep_and_detecting_running(self.ttl_ms / 2, lambda: self.running)
def start_notify(self):
while self.running:
# update connections
living_member_server_uris = set()
for member in self.living_members:
if member.server_uri == self.server_uri:
continue
if member.server_uri not in self.member_connections:
try:
channel = grpc.insecure_channel(member.server_uri)
self.member_connections[member.server_uri] = \
notification_service_pb2_grpc.NotificationServiceStub(channel)
except Exception:
logging.error("Exception thrown when connect to another member: %s." %
member.server_uri,
exc_info=True)
continue
living_member_server_uris.add(member.server_uri)
for server_uri in list(self.member_connections.keys()):
if server_uri not in living_member_server_uris:
del self.member_connections[server_uri]
notifies = []
while len(self.cached_notify) > 0:
event = self.cached_notify.popleft()
notify = Notify(key=event.key, namespace=event.namespace)
notifies.append(notify)
# notify others
if len(notifies) > 0:
for server_uri, stub in self.member_connections.items():
try:
stub.notify(NotifyRequest(notifies=notifies))
except Exception:
logging.error("Exception thrown when notify another member: %s." %
server_uri,
exc_info=True)
sleep_and_detecting_running(self.min_notify_interval_ms, lambda: self.running, 100)
def notify_others(self, event):
self.cached_notify.append(event)
def get_living_members(self):
if len(self.living_members) == 0:
self.living_members = self.storage.list_living_members(self.ttl_ms)
return self.living_members
def add_living_member(self, member: Member):
if member.server_uri not in [member.server_uri for member in self.living_members]:
self.living_members.append(member)
def detach_member_from_cluster(self):
self.storage.delete_member(self.server_uri)
def stop(self):
self.running = False
self.detach_member_from_cluster()
self.heartbeat_thread.join()
self.notify_thread.join()
|
util.py
|
"""Misc. utility functions"""
import threading
from typing import Any, Callable, cast, Generic, Optional, Set, TypeVar
T = TypeVar("T")
Callback = Callable[[T], None]
class Observable(Generic[T]):
"""Basic implementation of an observable. Used for passing state down a
(possibly) long chain of views and controllers. Initialize with some
initial value and use observable.subscribe(func) to call func whenever the
value changes.
Adapted from https://gist.github.com/ajfigueroa/c2af555630d1db3efb5178ece728b017
"""
def __init__(self, initial_value: T):
self.value = initial_value
self.callbacks: Set[Callback[T]] = set()
def subscribe(self, func: Callback[T], call: bool = True) -> None:
"""Add a callback that gets called whenever the value changes. Optionally
call the function immediately"""
self.callbacks.add(func)
if call:
func(self.value)
def unsubscribe(self, func: Callback[T]) -> None:
"""Remove a callback"""
if func in self.callbacks:
self.callbacks.remove(func)
def update(self) -> None:
"""Call all callbacks with the current value"""
# An attempt to make this (sorta) thread-safe. Python throws a
# RuntimeError if a set changes size while iterating over it, so we
# copy to a list first and then double-check membership each iteration
for func in list(self.callbacks):
if func in self.callbacks:
func(self.value)
def set(self, value: T) -> None:
"""Set a new value"""
self.value = value
self.update()
def get(self) -> T:
"""Get the current value"""
return self.value
class Observer(Generic[T]):
"""A callback that can change observables"""
def __init__(
self,
callback: Callback[T],
initial_observable: Optional[Observable[T]] = None,
call: bool = True,
):
self.observable: Optional[Observable[T]] = initial_observable
if self.observable:
self.observable.subscribe(callback, call=call)
self.callback = callback
def get(self) -> Optional[T]:
"""Gets the value of the current observable if it exists, else None"""
if not self.observable:
return None
return self.observable.get()
def stop(self) -> None:
"""Stop observing the current observable"""
if self.observable:
self.observable.unsubscribe(self.callback)
def set_observable(self, new_observable: Observable[T], call: bool = True) -> None:
"""Change the observable we're observing"""
if self.observable == new_observable:
return
if self.observable:
self.observable.unsubscribe(self.callback)
new_observable.subscribe(self.callback, call=call)
self.observable = new_observable
F = TypeVar("F", bound=Callable[..., Any])
def in_main_thread(func: F) -> F:
"""Decorate an instance method of a view such that it is always executed
on the main thread using tkthread."""
def wrapper(self, *args, **kwargs): # type: ignore
closure = lambda: func(self, *args, **kwargs)
self.controller.tkt(closure)
return cast(F, wrapper)
def run_thread(func: Callable[[], None]) -> None:
"""Run a function in another thread"""
threading.Thread(target=func).start()
|
rl_server_no_training_modified.py
|
#!/usr/bin/env python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import base64
import urllib
import sys
import os
import json
import multiprocessing
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
import tensorflow as tf
import time
import a3c
S_INFO = 6 # bit_rate, buffer_size, rebuffering_time, bandwidth_measurement, chunk_til_video_end
S_LEN = 8 # take how many frames in the past
A_DIM = 6
VIDEO_BIT_RATE = [300,750,1200,1850,2850,4300] # Kbps
BITRATE_REWARD = [1, 2, 3, 12, 15, 20]
BITRATE_REWARD_MAP = {0: 0, 300: 1, 750: 2, 1200: 3, 1850: 12, 2850: 15, 4300: 20}
M_IN_K = 1000.0
BUFFER_NORM_FACTOR = 10.0
CHUNK_TIL_VIDEO_END_CAP = 48.0
TOTAL_VIDEO_CHUNKS = 48
DEFAULT_QUALITY = 0 # default video quality without agent
REBUF_PENALTY = 4.3 # 1 sec rebuffering -> this number of Mbps
SMOOTH_PENALTY = 1
ACTOR_LR_RATE = 0.0001
CRITIC_LR_RATE = 0.001
TRAIN_SEQ_LEN = 100 # take as a train batch
MODEL_SAVE_INTERVAL = 100
RANDOM_SEED = 42
RAND_RANGE = 1000
SUMMARY_DIR = './results'
LOG_FILE = './results/log'
# in format of time_stamp bit_rate buffer_size rebuffer_time video_chunk_size download_time reward
# NN_MODEL = None
NN_MODEL = '../rl_server/results/pretrain_linear_reward.ckpt'
################################
# multiprocessing share variables
manager = multiprocessing.Manager()
Que1 = manager.dict()
Que2 = manager.dict()
QueOnline = manager.list()
DictOnline = {}
current_time = {}
current_quality = {}
begin_time = time.time()
################################
# video chunk sizes
size_video1 = [2354772, 2123065, 2177073, 2160877, 2233056, 1941625, 2157535, 2290172, 2055469, 2169201, 2173522, 2102452, 2209463, 2275376, 2005399, 2152483, 2289689, 2059512, 2220726, 2156729, 2039773, 2176469, 2221506, 2044075, 2186790, 2105231, 2395588, 1972048, 2134614, 2164140, 2113193, 2147852, 2191074, 2286761, 2307787, 2143948, 1919781, 2147467, 2133870, 2146120, 2108491, 2184571, 2121928, 2219102, 2124950, 2246506, 1961140, 2155012, 1433658]
size_video2 = [1728879, 1431809, 1300868, 1520281, 1472558, 1224260, 1388403, 1638769, 1348011, 1429765, 1354548, 1519951, 1422919, 1578343, 1231445, 1471065, 1491626, 1358801, 1537156, 1336050, 1415116, 1468126, 1505760, 1323990, 1383735, 1480464, 1547572, 1141971, 1498470, 1561263, 1341201, 1497683, 1358081, 1587293, 1492672, 1439896, 1139291, 1499009, 1427478, 1402287, 1339500, 1527299, 1343002, 1587250, 1464921, 1483527, 1231456, 1364537, 889412]
size_video3 = [1034108, 957685, 877771, 933276, 996749, 801058, 905515, 1060487, 852833, 913888, 939819, 917428, 946851, 1036454, 821631, 923170, 966699, 885714, 987708, 923755, 891604, 955231, 968026, 874175, 897976, 905935, 1076599, 758197, 972798, 975811, 873429, 954453, 885062, 1035329, 1026056, 943942, 728962, 938587, 908665, 930577, 858450, 1025005, 886255, 973972, 958994, 982064, 830730, 846370, 598850]
size_video4 = [668286, 611087, 571051, 617681, 652874, 520315, 561791, 709534, 584846, 560821, 607410, 594078, 624282, 687371, 526950, 587876, 617242, 581493, 639204, 586839, 601738, 616206, 656471, 536667, 587236, 590335, 696376, 487160, 622896, 641447, 570392, 620283, 584349, 670129, 690253, 598727, 487812, 575591, 605884, 587506, 566904, 641452, 599477, 634861, 630203, 638661, 538612, 550906, 391450]
size_video5 = [450283, 398865, 350812, 382355, 411561, 318564, 352642, 437162, 374758, 362795, 353220, 405134, 386351, 434409, 337059, 366214, 360831, 372963, 405596, 350713, 386472, 399894, 401853, 343800, 359903, 379700, 425781, 277716, 400396, 400508, 358218, 400322, 369834, 412837, 401088, 365161, 321064, 361565, 378327, 390680, 345516, 384505, 372093, 438281, 398987, 393804, 331053, 314107, 255954]
size_video6 = [181801, 155580, 139857, 155432, 163442, 126289, 153295, 173849, 150710, 139105, 141840, 156148, 160746, 179801, 140051, 138313, 143509, 150616, 165384, 140881, 157671, 157812, 163927, 137654, 146754, 153938, 181901, 111155, 153605, 149029, 157421, 157488, 143881, 163444, 179328, 159914, 131610, 124011, 144254, 149991, 147968, 161857, 145210, 172312, 167025, 160064, 137507, 118421, 112270]
def get_chunk_size(quality, index):
if ( index < 0 or index > 48 ):
return 0
# note that the quality and video labels are inverted (i.e., quality 8 is highest and this pertains to video1)
sizes = {5: size_video1[index], 4: size_video2[index], 3: size_video3[index], 2: size_video4[index], 1: size_video5[index], 0: size_video6[index]}
return sizes[quality]
def make_request_handler(input_dict):
class Request_Handler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.input_dict = input_dict
self.sess = input_dict['sess']
self.log_file = input_dict['log_file']
self.actor = input_dict['actor']
self.critic = input_dict['critic']
self.saver = input_dict['saver']
self.s_batch = input_dict['s_batch']
self.a_batch = input_dict['a_batch']
self.r_batch = input_dict['r_batch']
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_POST(self):
global Que1
global Que2
global begin_time
content_length = int(self.headers['Content-Length'])
post_data = json.loads(self.rfile.read(content_length))
print post_data
if ( 'pastThroughput' in post_data ):
# @Hongzi: this is just the summary of throughput/quality at the end of the load
# so we don't want to use this information to send back a new quality
print "Summary: ", post_data
else:
t = float(time.time() - begin_time)
q = int(post_data['lastquality'])
print(self.address_string())
if self.address_string() in Que1.keys():
temp1 = Que1[self.address_string()]
temp1.append(t)
temp2 = Que2[self.address_string()]
temp2.append(q)
Que1[self.address_string()] = temp1
Que2[self.address_string()] = temp2
else:
Que1[self.address_string()] = [t, ]
Que2[self.address_string()] = [q, ]
# option 1. reward for just quality
# reward = post_data['lastquality']
# option 2. combine reward for quality and rebuffer time
# tune up the knob on rebuf to prevent it more
# reward = post_data['lastquality'] - 0.1 * (post_data['RebufferTime'] - self.input_dict['last_total_rebuf'])
# option 3. give a fixed penalty if video is stalled
# this can reduce the variance in reward signal
# reward = post_data['lastquality'] - 10 * ((post_data['RebufferTime'] - self.input_dict['last_total_rebuf']) > 0)
# option 4. use the metric in SIGCOMM MPC paper
rebuffer_time = float(post_data['RebufferTime'] -self.input_dict['last_total_rebuf'])
# --linear reward--
reward = VIDEO_BIT_RATE[post_data['lastquality']] / M_IN_K \
- REBUF_PENALTY * rebuffer_time / M_IN_K \
- SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[post_data['lastquality']] -
self.input_dict['last_bit_rate']) / M_IN_K
# --log reward--
# log_bit_rate = np.log(VIDEO_BIT_RATE[post_data['lastquality']] / float(VIDEO_BIT_RATE[0]))
# log_last_bit_rate = np.log(self.input_dict['last_bit_rate'] / float(VIDEO_BIT_RATE[0]))
# reward = log_bit_rate \
# - 4.3 * rebuffer_time / M_IN_K \
# - SMOOTH_PENALTY * np.abs(log_bit_rate - log_last_bit_rate)
# --hd reward--
# reward = BITRATE_REWARD[post_data['lastquality']] \
# - 8 * rebuffer_time / M_IN_K - np.abs(BITRATE_REWARD[post_data['lastquality']] - BITRATE_REWARD_MAP[self.input_dict['last_bit_rate']])
self.input_dict['last_bit_rate'] = VIDEO_BIT_RATE[post_data['lastquality']]
self.input_dict['last_total_rebuf'] = post_data['RebufferTime']
# retrieve previous state
if len(self.s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(self.s_batch[-1], copy=True)
# compute bandwidth measurement
video_chunk_fetch_time = post_data['lastChunkFinishTime'] - post_data['lastChunkStartTime']
video_chunk_size = post_data['lastChunkSize']
# compute number of video chunks left
video_chunk_remain = TOTAL_VIDEO_CHUNKS - self.input_dict['video_chunk_coount']
self.input_dict['video_chunk_coount'] += 1
# dequeue history record
state = np.roll(state, -1, axis=1)
next_video_chunk_sizes = []
for i in xrange(A_DIM):
next_video_chunk_sizes.append(get_chunk_size(i, self.input_dict['video_chunk_coount']))
# this should be S_INFO number of terms
try:
state[0, -1] = VIDEO_BIT_RATE[post_data['lastquality']] / float(np.max(VIDEO_BIT_RATE))
state[1, -1] = post_data['buffer'] / BUFFER_NORM_FACTOR
state[2, -1] = float(video_chunk_size) / float(video_chunk_fetch_time) / M_IN_K # kilo byte / ms
state[3, -1] = float(video_chunk_fetch_time) / M_IN_K / BUFFER_NORM_FACTOR # 10 sec
state[4, :A_DIM] = np.array(next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte
state[5, -1] = np.minimum(video_chunk_remain, CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)
except ZeroDivisionError:
# this should occur VERY rarely (1 out of 3000), should be a dash issue
# in this case we ignore the observation and roll back to an eariler one
if len(self.s_batch) == 0:
state = [np.zeros((S_INFO, S_LEN))]
else:
state = np.array(self.s_batch[-1], copy=True)
# log wall_time, bit_rate, buffer_size, rebuffer_time, video_chunk_size, download_time, reward
self.log_file.write(str(time.time()) + '\t' +
str(VIDEO_BIT_RATE[post_data['lastquality']]) + '\t' +
str(post_data['buffer']) + '\t' +
str(rebuffer_time / M_IN_K) + '\t' +
str(video_chunk_size) + '\t' +
str(video_chunk_fetch_time) + '\t' +
str(reward) + '\n')
self.log_file.flush()
action_prob = self.actor.predict(np.reshape(state, (1, S_INFO, S_LEN)))
action_cumsum = np.cumsum(action_prob)
bit_rate = (action_cumsum > np.random.randint(1, RAND_RANGE) / float(RAND_RANGE)).argmax()
# Note: we need to discretize the probability into 1/RAND_RANGE steps,
# because there is an intrinsic discrepancy in passing single state and batch states
# send data to html side ??????????what does it mean
send_data = str(bit_rate)
end_of_video = False
if ( post_data['lastRequest'] == TOTAL_VIDEO_CHUNKS ):
send_data = "REFRESH"
end_of_video = True
self.input_dict['last_total_rebuf'] = 0
self.input_dict['last_bit_rate'] = DEFAULT_QUALITY
self.input_dict['video_chunk_coount'] = 0
self.log_file.write('\n') # so that in the log we know where video ends
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', len(send_data))
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(send_data)
# record [state, action, reward]
# put it here after training, notice there is a shift in reward storage
if end_of_video:
self.s_batch = [np.zeros((S_INFO, S_LEN))]
else:
self.s_batch.append(state)
def do_GET(self):
print >> sys.stderr, 'GOT REQ'
self.send_response(200)
#self.send_header('Cache-Control', 'Cache-Control: no-cache, no-store, must-revalidate max-age=0')
self.send_header('Cache-Control', 'max-age=3000')
self.send_header('Content-Length', 20)
self.end_headers()
self.wfile.write("console.log('here');")
def log_message(self, format, *args):
return
return Request_Handler
###### onlineCheck #######
def onlineCheck(Que1, Que2, QueOL):
color_ = ['black', 'red', 'blue', 'green', 'gold', 'm']
while True:
j = 0
for i in Que1.keys():
plt.plot(Que1.get(i), Que2.get(i), color=color_[j])
plt.scatter(Que1.get(i), Que2.get(i), color=color_[j])
j += 1
plt.pause(1)
time.sleep(2)
##########################
def run(server_class=HTTPServer, port=8333, log_file_path=LOG_FILE):
np.random.seed(RANDOM_SEED)
assert len(VIDEO_BIT_RATE) == A_DIM
if not os.path.exists(SUMMARY_DIR):
os.makedirs(SUMMARY_DIR)
with tf.Session() as sess, open(log_file_path, 'wb') as log_file:
actor = a3c.ActorNetwork(sess,
state_dim=[S_INFO, S_LEN], action_dim=A_DIM,
learning_rate=ACTOR_LR_RATE)
critic = a3c.CriticNetwork(sess,
state_dim=[S_INFO, S_LEN],
learning_rate=CRITIC_LR_RATE)
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver() # save neural net parameters
# restore neural net parameters
nn_model = NN_MODEL
if nn_model is not None: # nn_model is the path to file
saver.restore(sess, nn_model)
print("Model restored.")
init_action = np.zeros(A_DIM)
init_action[DEFAULT_QUALITY] = 1
s_batch = [np.zeros((S_INFO, S_LEN))]
a_batch = [init_action]
r_batch = []
train_counter = 0
last_bit_rate = DEFAULT_QUALITY
last_total_rebuf = 0
# need this storage, because observation only contains total rebuffering time
# we compute the difference to get
video_chunk_count = 0
input_dict = {'sess': sess, 'log_file': log_file,
'actor': actor, 'critic': critic,
'saver': saver, 'train_counter': train_counter,
'last_bit_rate': last_bit_rate,
'last_total_rebuf': last_total_rebuf,
'video_chunk_coount': video_chunk_count,
's_batch': s_batch, 'a_batch': a_batch, 'r_batch': r_batch}
# interface to abr_rl server
handler_class = make_request_handler(input_dict=input_dict)
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print 'Listening on port ' + str(port)
####### onlineCheck ######
global Que1
global Que2
global QueOnline
p = multiprocessing.Process(target=onlineCheck, args=(Que1, Que2, QueOnline))
p.start()
p.deamon = True
##########################
httpd.serve_forever()
def main():
if len(sys.argv) == 2:
trace_file = sys.argv[1]
run(log_file_path=LOG_FILE + '_RL_' + trace_file)
else:
run()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Keyboard interrupted."
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
transform_replay.py
|
#!/usr/bin/env python
from pysc2.lib import features, point
from absl import app, flags
from pysc2.env.environment import TimeStep, StepType
from pysc2 import run_configs
from s2clientprotocol import sc2api_pb2 as sc_pb
import importlib
import glob
from random import randint
import pickle
from multiprocessing import Process
from tqdm import tqdm
import math
import random
import numpy as np
import multiprocessing
cpus = multiprocessing.cpu_count()
FLAGS = flags.FLAGS
flags.DEFINE_string("replays", None, "Path to the replay files.")
flags.DEFINE_string("agent", None, "Path to an agent.")
flags.DEFINE_integer("procs", cpus, "Number of processes.", lower_bound=1)
flags.DEFINE_integer("frames", 10, "Frames per game.", lower_bound=1)
flags.DEFINE_integer("start", 0, "Start at replay no.", lower_bound=0)
flags.DEFINE_integer("batch", 16, "Size of replay batch for each process", lower_bound=1, upper_bound=512)
flags.mark_flag_as_required("replays")
flags.mark_flag_as_required("agent")
class Parser:
def __init__(self,
replay_file_path,
agent,
player_id=1,
screen_size_px=(60, 60),
minimap_size_px=(60, 60),
discount=1.,
frames_per_game=1):
print("Parsing " + replay_file_path)
self.replay_file_name = replay_file_path.split("/")[-1].split(".")[0]
self.agent = agent
self.discount = discount
self.frames_per_game = frames_per_game
self.run_config = run_configs.get()
self.sc2_proc = self.run_config.start()
self.controller = self.sc2_proc.controller
replay_data = self.run_config.replay_data(self.replay_file_name + '.SC2Replay')
ping = self.controller.ping()
self.info = self.controller.replay_info(replay_data)
if not self._valid_replay(self.info, ping):
raise Exception("{} is not a valid replay file!".format(self.replay_file_name + '.SC2Replay'))
screen_size_px = point.Point(*screen_size_px)
minimap_size_px = point.Point(*minimap_size_px)
interface = sc_pb.InterfaceOptions(
raw=False, score=True,
feature_layer=sc_pb.SpatialCameraSetup(width=24))
screen_size_px.assign_to(interface.feature_layer.resolution)
minimap_size_px.assign_to(interface.feature_layer.minimap_resolution)
map_data = None
if self.info.local_map_path:
map_data = self.run_config.map_data(self.info.local_map_path)
self._episode_length = self.info.game_duration_loops
self._episode_steps = 0
self.controller.start_replay(sc_pb.RequestStartReplay(
replay_data=replay_data,
map_data=map_data,
options=interface,
observed_player_id=player_id))
self._state = StepType.FIRST
@staticmethod
def _valid_replay(info, ping):
"""Make sure the replay isn't corrupt, and is worth looking at."""
if (info.HasField("error") or
info.base_build != ping.base_build or # different game version
info.game_duration_loops < 1000 or
len(info.player_info) != 2):
# Probably corrupt, or just not interesting.
return False
# for p in info.player_info:
# if p.player_apm < 10 or p.player_mmr < 1000:
# # Low APM = player just standing around.
# # Low MMR = corrupt replay or player who is weak.
# return False
return True
def start(self):
_features = features.Features(self.controller.game_info())
frames = random.sample(np.arange(self.info.game_duration_loops).tolist(), self.info.game_duration_loops)
frames = frames[0 : min(self.frames_per_game, self.info.game_duration_loops)]
frames.sort()
last_frame = 0
for frame in frames:
skips = frame - last_frame
last_frame = frame
self.controller.step(skips)
obs = self.controller.observe()
agent_obs = _features.transform_obs(obs.observation)
if obs.player_result: # Episode over.
self._state = StepType.LAST
discount = 0
else:
discount = self.discount
self._episode_steps += skips
step = TimeStep(step_type=self._state, reward=0,
discount=discount, observation=agent_obs)
self.agent.step(step, obs.actions, self.info)
if obs.player_result:
break
self._state = StepType.MID
print("Saving data")
pickle.dump({"info" : self.info, "state" : self.agent.states}, open("data/" + self.replay_file_name + ".p", "wb"))
print("Data successfully saved")
self.agent.states = []
print("Data flushed")
print("Done")
def parse_replay(replay_batch, agent_module, agent_cls, frames_per_game):
for replay in replay_batch:
try:
parser = Parser(replay, agent_cls(), frames_per_game=frames_per_game)
parser.start()
except Exception as e:
print(e)
def main(unused):
agent_module, agent_name = FLAGS.agent.rsplit(".", 1)
agent_cls = getattr(importlib.import_module(agent_module), agent_name)
processes = FLAGS.procs
replay_folder = FLAGS.replays
frames_per_game = FLAGS.frames
batch_size = FLAGS.batch
replays = glob.glob(replay_folder + '*.SC2Replay')
start = FLAGS.start
for i in tqdm(range(math.ceil(len(replays)/processes/batch_size))):
procs = []
x = i * processes * batch_size
if x < start:
continue
for p in range(processes):
xp1 = x + p * batch_size
xp2 = xp1 + batch_size
xp2 = min(xp2, len(replays))
p = Process(target=parse_replay, args=(replays[xp1:xp2], agent_module, agent_cls, frames_per_game))
p.start()
procs.append(p)
if xp2 == len(replays):
break
for p in procs:
p.join()
if __name__ == "__main__":
app.run(main)
|
console.py
|
"""
@copyright: 2013 Single D Software - All Rights Reserved
@summary: Provides a console API for Light Maestro.
"""
# Standard library imports
import collections
import json
import logging
import os
import re
import threading
import time
# Application imports
import wavetrigger
# Named logger for this module
_logger = logging.getLogger(__name__)
# Maximum number of channels supported
maxchannels = 96
class SceneAlreadyChangedError(Exception):
"""Requested scene is changing or already changed."""
pass
class SceneNotFoundError(Exception):
"""Missing or corrupt scene file."""
pass
class NotSupportedError(Exception):
"""Console does not support this function."""
pass
class CommunicationError(Exception):
"""Communication with the console failed."""
pass
def _alphasort(items):
""" Sort the given list in the way that humans expect."""
convert = lambda t: int(t) if t.isdigit() else t
alphakey = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(items, key=alphakey)
class Console():
"""Abstract class from which all other console classes inherit."""
def _getscenefilename(self, sceneid):
return os.path.join(self._scenepath, sceneid + '.json')
def getstatus(self):
"""
Provide status information for the connection to the console.
@return: Dictionary containing status information
"""
status = {'condition': 'operational'}
status['interface'] = self.__class__.__name__
status['fading'] = self._target is not None
if self._sceneid is not None:
status['scene'] = self._sceneid
return status
def getchannels(self):
"""
Provide all DMX channel values.
@return: Dictionary containing all channel numbers and values
"""
return self._channels
def loadchannels(self, data, sceneid=None):
with self._lock:
fade = data.get('fade', 0)
self._target = data.get('channels', {})
self._fadetime = time.time() + fade
self._sceneid = sceneid
# Bypass the fading logic
if fade == 0:
self._setchannels(self._target)
def getpalette(self):
try:
with open('palettes/Palette.json') as f:
return json.load(f)
except (IOError, ValueError):
raise CommunicationError
def savepalette(self, palette):
try:
with open('palettes/Palette.json', 'w') as f:
json.dump(palette, f, indent=4, sort_keys=True)
except (IOError, TypeError):
raise CommunicationError
def getscenes(self):
try:
scenelist = _alphasort(os.listdir(self._scenepath))
scenelist = [os.path.splitext(s) for s in scenelist]
scenelist = [s[0] for s in scenelist if s[1] == '.json']
return scenelist
except OSError:
raise CommunicationError
def getscene(self, sceneid):
try:
with open(self._getscenefilename(sceneid)) as f:
return json.load(f)
except IOError:
raise SceneNotFoundError
except ValueError:
raise CommunicationError
def changescene(self, sceneid):
if self._sceneid == sceneid:
raise SceneAlreadyChangedError
scene = self.getscene(sceneid)
self.loadchannels(scene, sceneid)
def loadscene(self, sceneid):
scene = self.getscene(sceneid)
scene.pop('fade', None)
self.loadchannels(scene, sceneid)
def savescene(self, sceneid, scene=None):
if scene is None:
scene = self.getchannels()
try:
with open(self._getscenefilename(sceneid), 'w') as f:
json.dump(scene, f, indent=4, sort_keys=True)
wavetrigger.writewavefile(sceneid)
except (IOError, TypeError):
raise CommunicationError
self._sceneid = sceneid
def deletescene(self, sceneid):
try:
if self._sceneid == sceneid:
self._sceneid = None
os.remove(self._getscenefilename(sceneid))
os.remove(wavetrigger.getwavefilename(sceneid))
except FileNotFoundError:
return
except OSError:
raise CommunicationError
def _setchannels(self, channels):
self._channels.update(channels)
def _fader(self):
fadedelay = 0.1
while True:
time.sleep(fadedelay)
if self._target:
with self._lock:
remainingfade = self._fadetime - time.time()
if remainingfade > fadedelay:
fadechannels = {}
for c, v in self._target.items():
delta = (self._target[c] - self._channels[c]) * fadedelay / remainingfade
fadechannels[c] = self._channels[c] + delta
self._setchannels(fadechannels)
else:
self._setchannels(self._target)
self._target = None
def __init__(self, scenepath='scenes'):
"""Initialize the console object."""
self._channels = collections.OrderedDict((str(c+1), 0.0) for c in range(maxchannels))
self._target = self._channels
self._fadetime = time.time()
self._sceneid = None
self._lock = threading.Lock()
self._scenepath = scenepath
# Start the scene transition task
threading.Thread(target=self._fader).start()
|
stripsim.py
|
"""
stripsim.py
Graphical NeoPixel LED strip emulator.
Connect target's Neopixel output to NeoPill's (STM32 Bluepill) inputs.
Connect PC via USB to NeoPill for Neopixel->USB serial bridge. Find the appropriate COM port to use.
This code reads a yaml file for configuration.
Each LED is a simple rectangle or circle, with an optional small gap in-between.
Using a COM port, we sync to the pixel bitstream. Once sync'd each frame of pixel data is gamma-corrected and displayed as fast as possible.
Display frame rate dictated by the target device and config item 'fps_limit'. However, many LCD monitors only go to 60Hz.
Simple runtime controls (window active):
mouseclick : shows runtime stats
Up/Dn arrow: adjust gamma
's': re-sync to the bitstream
COM port not connecting? Go into Windows DM and "disable/enable" the port.
This may be required after Bluepill reset.
Need to emulate with different bitstream timing?
timing_T1H, timing_RES config items.
For python 3.8.1
R. Elwin 5/2021
"""
import threading
import queue
from typing import List, Any
import yaml # 5.4.1
import pygame # 2.0!
# from pygame.locals import * # doing this floods debug with symbols so specify ones we actually use
from pygame.locals import QUIT, MOUSEBUTTONDOWN, KEYDOWN, K_UP, K_DOWN, K_s
import os
import time
import sys
import serial # 3.5
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (20, 20) # default window position
# Init USB serial port, send optional timing string
def serial_init(com_port, timing_mod=None):
try:
ser = serial.Serial(com_port, baudrate=12000000, timeout=None) # max USB speed, read timeout adjusted for debugging, timeout = None waits forever
if timing_mod is not None:
ser.write(bytearray(timing_mod.encode('utf-8'))) # send timing mod
# ser.write(bytearray("T 38 3601".encode('utf-8'))) # test timing mod
time.sleep(0.25) # can't use flushInput() so delay instead
return ser
except serial.SerialException:
print("SerialException: Can't Open", com_port)
return None
# sync with NeoPill, may fail occasionally...
def serial_sync(ser):
ser.write(bytearray("F".encode('utf-8'))) # send 'Flush' byte
ser.reset_input_buffer() # having issues with USB data not clearing (on Windows10) so loop a few times
for d in range(0, 3):
time.sleep(0.25) # wait for USB drain
ser.reset_input_buffer() # in case frame data in the pipe
ser.write(bytearray("S".encode('utf-8'))) # send sync start byte
class LEDStrip(object):
"""
reads YAML config
configures pygame window display
generates LED display
formats LED frames to window
"""
# indicies for LED array surf_obj[0], rect[1], color[2]
IDX_surf_obj = 0
IDX_rect = 1
IDX_color = 2
def __init__(self, config_path):
self.ledarray: List[Any] = [] # LED graphics elements
self.led_display_format = None
self.led_show = None
self.screen = None
self.ser = None
self.serial_port = None
self.timing_mod_T1H = None
self.timing_mod_RES = None
self.timing_mod = None
self.strip_type = {} # data from yaml file
self.matrix_type = {} # data from yaml file
self.led_count = 0
self.led_width = 1
self.led_height = 1
self.radius = 0
self.center = 0
self.gamma = 0.25 # LEDs are much brighter than LCD, smaller gamma=brighter. HW gamma not supported.
self.frame_size = 0
self.display_rate_limit = 1 / 120 # 120Hz display rate limit default, might be too fast still
self.led_create = None
self.generate_display = None
self.read_config(config_path)
self.format_timing_mod()
self.generate_display()
self.led_create(self.led_count) # only using 1 strip/matrix at a time so far
# load the yaml file into ledlist{}
def read_config(self, config_path):
with open(config_path) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
# print("Key: Value")
for key, value in data.items():
if key == "strip":
self.strip_type = value
self.generate_display = self.generate_display_strip
# print(self.strip_type)
elif key == "matrix":
self.matrix_type = value
self.generate_display = self.generate_display_matrix
elif key == "timing_T1H":
self.timing_mod_T1H = value # modified timing string
print("Using Modified T1H timing", f"{value}")
elif key == "timing_RES":
self.timing_mod_RES = value # modified timing string
print("Using Modified RES timing", f"{value}")
elif key == "serial_port":
self.serial_port = value
elif key == "fps_limit":
self.display_rate_limit = 1 / (value * 1.1) # use time per frame, plus 10% slop
# format timing mod msg, specific to NeoPill timing. Calcs TIM2,4 delay, in clock counts (not ns).
def format_timing_mod(self):
cpu_clk = 13.88 # 13.88ns clk cycle, 1/72MHz
T1H = 0
if self.timing_mod_T1H is not None:
T1H = int((int(self.timing_mod_T1H) - 3 * cpu_clk) / cpu_clk) # TIM4 ARR has 3 clk delay
RES = 0
if self.timing_mod_RES is not None:
RES = int(int(self.timing_mod_RES) / cpu_clk) # TIM2 CCR1 count
# format a timing mod if using either
if self.timing_mod_T1H is not None or self.timing_mod_RES is not None:
self.timing_mod = "T " + str(T1H) + " " + str(RES)
# generate pygame display window filled with LED strip
def generate_display_strip(self):
self.led_create = self.led_create_strip
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (self.strip_type[0]['wposx'], self.strip_type[0]['wposy']) # window position
self.led_count = self.strip_type[0]['num']
self.led_width = int(self.strip_type[0]['length'] / self.strip_type[0]['num']) - self.strip_type[0]['gap']
strip_width = (self.led_width + self.strip_type[0]['gap']) * self.strip_type[0]['num']
size = (strip_width, self.strip_type[0]['height']) # create a strip
self.screen = pygame.display.set_mode(size, flags=0, display=0) # vsync=0 default
pygame.display.set_caption(self.strip_type[0]['wname'])
print(pygame.display.get_driver())
print(pygame.display.Info())
# populate LED array with LED surface objects, filled circles or plain squares
def led_create_strip(self, num):
for i in range(0, num):
surf = pygame.Surface((self.led_width, self.strip_type[0]['height'])) # surface object
rect = surf.get_rect()
rect.left = i * (self.led_width + self.strip_type[0]['gap'])
rect.top = 0
color = pygame.Color(0, (i * 3) % 256, 10, 255) # initial RGBA color, also fills in Alpha of RGBA
self.ledarray.append((surf, rect, color)) # surf_obj[0], rect[1], color[2]
self.led_format(self.strip_type[0]['style'], self.strip_type[0]['ledcolor'], num)
# generate pygame display window filled with LED matrix
def generate_display_matrix(self):
self.led_create = self.led_create_matrix
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (self.matrix_type[0]['wposx'], self.matrix_type[0]['wposy']) # window position
self.led_count = self.matrix_type[0]['matrix_w'] * self.matrix_type[0]['matrix_h']
self.led_width = int(self.matrix_type[0]['length'] / self.matrix_type[0]['matrix_w']) - self.matrix_type[0]['gap'] # gap is symmetric side & bottom
strip_width = (self.led_width + self.matrix_type[0]['gap']) * self.matrix_type[0]['matrix_w'] - self.matrix_type[0]['gap']
self.led_height = int(self.matrix_type[0]['height'] / self.matrix_type[0]['matrix_h']) - self.matrix_type[0]['gap']
strip_height = (self.led_height + self.matrix_type[0]['gap']) * self.matrix_type[0]['matrix_h'] - self.matrix_type[0]['gap']
size = (strip_width, strip_height) # display (w,h), adjusted width, height
self.screen = pygame.display.set_mode(size, flags=0, display=0) # vsync=0 default
pygame.display.set_caption(self.matrix_type[0]['wname'])
print(pygame.display.get_driver())
print(pygame.display.Info())
# matrix option, populate LED array with LED surface objects, filled circles or plain squares
def led_create_matrix(self, num):
for j in range(0, self.matrix_type[0]['matrix_h']):
for i in range(0, self.matrix_type[0]['matrix_w']):
surf = pygame.Surface((self.led_width, self.led_height)) # surface object
rect = surf.get_rect()
rect.left = i * (self.led_width + self.matrix_type[0]['gap'])
rect.top = j * (self.led_height + self.matrix_type[0]['gap'])
color = pygame.Color(j, (i * 3) % 256, 10, 255) # initial RGBA color, also fills in Alpha of RGBA
self.ledarray.append((surf, rect, color)) # surf_obj[0], rect[1], color[2]
self.led_format(self.matrix_type[0]['style'], self.matrix_type[0]['ledcolor'], num)
# assign LED style, color order, frame size
def led_format(self, style, ledcolor, num):
if style == 'circle':
self.led_show = self.led_show_circle
# use ledarray[0] to get rect attributes to calc circle radius and center
self.radius = min(self.ledarray[0][LEDStrip.IDX_rect].h, self.ledarray[0][LEDStrip.IDX_rect].w) / 2
self.center = self.ledarray[0][LEDStrip.IDX_rect].w / 2, self.ledarray[0][LEDStrip.IDX_rect].h / 2
else:
self.led_show = self.led_show_rect
if ledcolor == 'GRBW':
self.led_display_format = self.led_frame_copy_GRBW
self.frame_size = 4 * num
elif ledcolor == 'GRB':
self.led_display_format = self.led_frame_copy_GRB
self.frame_size = 3 * num
else:
print("Error! No LED format, ledcolor: not assigned")
# blit rectangular LEDs, then display
def led_show_rect(self):
for led in self.ledarray:
color = led[LEDStrip.IDX_color].correct_gamma(self.gamma) # adjust gamma for each LED
led[LEDStrip.IDX_surf_obj].fill(color) # color square
self.screen.blit(led[LEDStrip.IDX_surf_obj], led[LEDStrip.IDX_rect])
pygame.display.flip()
# blit circular LEDs, then display
def led_show_circle(self):
for led in self.ledarray:
color = led[LEDStrip.IDX_color].correct_gamma(self.gamma) # adjust gamma for each LED
pygame.draw.circle(led[LEDStrip.IDX_surf_obj], color, self.center, self.radius)
self.screen.blit(led[LEDStrip.IDX_surf_obj], led[LEDStrip.IDX_rect])
pygame.display.flip()
# copy a frame of GRB LED data to RGBA LED array
def led_frame_copy_GRB(self, led_data_frame):
i = 0
for k in range(0, self.led_count):
self.ledarray[k][LEDStrip.IDX_color][1] = led_data_frame[i] # G
i += 1
self.ledarray[k][LEDStrip.IDX_color][0] = led_data_frame[i] # R
i += 1
self.ledarray[k][LEDStrip.IDX_color][2] = led_data_frame[i] # B
i += 1
# copy a frame of GRBW LED data to RGBA LED array
def led_frame_copy_GRBW(self, led_data_frame):
i = 0
for k in range(0, self.led_count):
self.ledarray[k][LEDStrip.IDX_color][1] = led_data_frame[i] # G
i += 1
self.ledarray[k][LEDStrip.IDX_color][0] = led_data_frame[i] # R
i += 1
self.ledarray[k][LEDStrip.IDX_color][2] = led_data_frame[i] # B
i += 1
w = led_data_frame[i]
# what to do w/white LED? just saturate add to each color
if w > 0:
for j in range(0, 3):
c = self.ledarray[k][LEDStrip.IDX_color][j] + w
if c > 255:
c = 255
self.ledarray[k][LEDStrip.IDX_color][j] = c
# ledarray[k][5][3] = 255 # alpha already populated
i += 1
class SerialReader(object):
"""
from Miniterm Terminal application.
Copy blksize data bytes from serial port to Queue q
"""
def __init__(self, serial_instance, q, blksize):
self.serial = serial_instance
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.q = q
self.blksize = blksize
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
self._stop_reader()
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.receiver_thread.join()
def close(self):
self.serial.close()
def reader(self):
"""loop and queue serial data frames"""
try:
while self.alive and self._reader_alive:
data = self.serial.read(self.blksize)
self.q.put(data)
except serial.SerialException:
self.alive = False
raise # XXX handle instead of re-raise
##===============================================
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Missing config.yaml")
exit(-2)
print("pyserial ver:", serial.__version__)
print("pyYAML ver:", yaml.__version__)
pygame.init()
strip = LEDStrip(sys.argv[1])
strip.led_show()
ser = serial_init(strip.serial_port, strip.timing_mod)
if ser is None:
exit(-1) # many reasons to not connect
print("Using COM Port:", ser.name)
dataQueue = queue.Queue(1024) # input data Q, in frames. Rather large in case you're moving the window.
sr = SerialReader(ser, dataQueue, strip.frame_size)
# initial sync, if this fails press then 's' ...
sr.start()
serial_sync(ser) # resync and flush
maxqsz = 0
frames_displayed = 0
frames_read = 0
t = time.perf_counter()
fps_t = time.perf_counter()
while True:
if dataQueue.qsize() > 0:
frame_data = dataQueue.get()
frames_read += 1
# rate limit display
fps_et = time.perf_counter() - fps_t
if fps_et >= strip.display_rate_limit:
fps_t = time.perf_counter()
strip.led_display_format(frame_data)
strip.led_show()
frames_displayed += 1
if dataQueue.qsize() > maxqsz:
maxqsz = dataQueue.qsize()
else:
time.sleep(0.0001) # let input stream dictate FPS, but need a small sleep to keep CPU% lower
# Cycles through all occurring events
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONDOWN: # show some stats
et = time.perf_counter() - t
print("maxQ=", maxqsz, ", fps displayed=", int(frames_displayed / et), ", displayed rate b/s", int(frames_displayed / et) * strip.frame_size, ", input rate b/s=",
int(frames_read / et) * strip.frame_size)
t = time.perf_counter()
frames_displayed = 0
frames_read = 0
maxqsz = 0
elif event.type == KEYDOWN: # change gamma
if event.key == K_UP:
strip.gamma -= 0.01
elif event.key == K_DOWN:
strip.gamma += 0.01
elif event.key == K_s:
print("Sync")
sr.stop()
serial_sync(ser) # try to resync, flush
sr.start()
if strip.gamma > 1:
strip.gamma = 1
elif strip.gamma < 0:
strip.gamma = 0.1
print('{:.2f}'.format(strip.gamma))
|
HTTPControl.py
|
import logging
import mimetypes
import os
import pathlib
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from datetime import datetime, timedelta
import jinja2
import json
import re
import threading
import time
import urllib.parse
import math
from ww import f
logger = logging.getLogger(__name__.rsplit(".")[-1])
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
class HTTPControl:
configConfig = {}
configHTTP = {}
httpPort = 8080
master = None
status = False
def __init__(self, master):
self.master = master
try:
self.configConfig = master.config["config"]
except KeyError:
self.configConfig = {}
try:
self.configHTTP = master.config["control"]["HTTP"]
except KeyError:
self.configHTTP = {}
self.httpPort = self.configHTTP.get("listenPort", 8080)
self.status = self.configHTTP.get("enabled", False)
# Unload if this module is disabled or misconfigured
if (not self.status) or (int(self.httpPort) < 1):
self.master.releaseModule("lib.TWCManager.Control", self.__class__.__name__)
return None
HTTPHandler = CreateHTTPHandlerClass(master)
httpd = None
try:
httpd = ThreadingSimpleServer(("", self.httpPort), HTTPHandler)
except OSError as e:
logger.error("Unable to start HTTP Server: " + str(e))
if httpd:
logger.info("Serving at port: " + str(self.httpPort))
threading.Thread(target=httpd.serve_forever, daemon=True).start()
else:
self.master.releaseModule("lib.TWCManager.Control", self.__class__.__name__)
def CreateHTTPHandlerClass(master):
class HTTPControlHandler(BaseHTTPRequestHandler):
ampsList = []
fields = {}
hoursDurationList = []
master = None
path = ""
post_data = ""
templateEnv = None
templateLoader = None
timeList = []
url = None
def __init__(self, *args, **kwargs):
# Populate ampsList so that any function which requires a list of supported
# TWC amps can easily access it
if not len(self.ampsList):
self.ampsList.append([0, "Disabled"])
for amp in range(
5, (master.config["config"].get("wiringMaxAmpsPerTWC", 5)) + 1
):
self.ampsList.append([amp, str(amp) + "A"])
# Populate list of hours
if not len(self.hoursDurationList):
for hour in range(1, 25):
self.hoursDurationList.append([(hour * 3600), str(hour) + "h"])
if not len(self.timeList):
for hour in range(0, 24):
for mins in [0, 15, 30, 45]:
strHour = str(hour)
strMins = str(mins)
if hour < 10:
strHour = "0" + str(hour)
if mins < 10:
strMins = "0" + str(mins)
self.timeList.append(
[strHour + ":" + strMins, strHour + ":" + strMins]
)
# Define jinja2 template environment
# Note that we specify two paths in order to the template loader.
# The first is the user specified template. The second is the default.
# Jinja2 will try for the specified template first, however if any files
# are not found, it will fall back to the default theme.
self.templateLoader = jinja2.FileSystemLoader(
searchpath=[
pathlib.Path(__file__).resolve().parent.as_posix()
+ "/themes/"
+ master.settings.get("webControlTheme", "Modern")
+ "/",
pathlib.Path(__file__).resolve().parent.as_posix()
+ "/themes/Default/",
]
)
self.templateEnv = jinja2.Environment(
loader=self.templateLoader, autoescape=True
)
# Make certain functions available to jinja2
# Where we have helper functions that we've used in the fast to
# render HTML, we can keep using those even inside jinja2
self.templateEnv.globals.update(addButton=self.addButton)
self.templateEnv.globals.update(ampsList=self.ampsList)
self.templateEnv.globals.update(chargeScheduleDay=self.chargeScheduleDay)
self.templateEnv.globals.update(checkBox=self.checkBox)
self.templateEnv.globals.update(doChargeSchedule=self.do_chargeSchedule)
self.templateEnv.globals.update(getMFADevices=master.getModuleByName("TeslaAPI").getMFADevices)
self.templateEnv.globals.update(hoursDurationList=self.hoursDurationList)
self.templateEnv.globals.update(navbarItem=self.navbar_item)
self.templateEnv.globals.update(optionList=self.optionList)
self.templateEnv.globals.update(timeList=self.timeList)
# Set master object
self.master = master
# Call parent constructor last, this is where the request is served
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def checkBox(self, name, value):
cb = "<input type=checkbox name='" + name + "'"
if value:
cb += " checked"
cb += ">"
return cb
def do_chargeSchedule(self):
schedule = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
]
settings = master.settings.get("Schedule", {})
page = """
<table class='table table-sm'>
<thead>
<th scope='col'> </th>
"""
for day in schedule:
page += "<th scope='col'>" + day[:3] + "</th>"
page += """
</thead>
<tbody>"""
for i in (x for y in (range(6, 24), range(0, 6)) for x in y):
page += "<tr><th scope='row'>%02d</th>" % (i)
for day in schedule:
today = settings.get(day, {})
curday = settings.get("Common", {})
if settings.get("schedulePerDay", 0):
curday = settings.get(day, {})
if (
today.get("enabled", None) == "on"
and (int(curday.get("start", 0)[:2]) <= int(i))
and (int(curday.get("end", 0)[:2]) >= int(i))
):
page += (
"<td bgcolor='#CFFAFF'>SC @ "
+ str(
settings.get("Settings", {}).get("scheduledAmpsMax", 0)
)
+ "A</td>"
)
else:
# Todo - need to mark track green + non scheduled chg
page += "<td bgcolor='#FFDDFF'> </td>"
page += "</tr>"
page += "</tbody>"
page += "</table>"
return page
def navbar_item(self, url, name, target="_self"):
active = ""
urlp = urllib.parse.urlparse(self.path)
if urlp.path == url:
active = "active"
page = "<li class='nav-item %s'>" % active
page += "<a class='nav-link' target='%s' href='%s'>%s</a>" % (target, url, name)
page += "</li>"
return page
def do_API_GET(self):
self.debugLogAPI("Starting API GET")
if self.url.path == "/api/getConfig":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(master.config)
# Scrub output of passwords and API keys
json_datas = re.sub(r'"password": ".*?",', "", json_data)
json_data = re.sub(r'"apiKey": ".*?",', "", json_datas)
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getConsumptionOffsets":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
if not master.settings.get("consumptionOffset", None):
master.settings["consumptionOffset"] = {}
json_data = json.dumps(master.settings["consumptionOffset"])
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getLastTWCResponse":
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(str(master.lastTWCResponseMsg).encode("utf-8"))
elif self.url.path == "/api/getPolicy":
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(master.getModuleByName("Policy").charge_policy)
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getSlaveTWCs":
data = {}
totals = {
"lastAmpsOffered": 0,
"lifetimekWh": 0,
"maxAmps": 0,
"reportedAmpsActual": 0,
}
for slaveTWC in master.getSlaveTWCs():
TWCID = "%02X%02X" % (slaveTWC.TWCID[0], slaveTWC.TWCID[1])
data[TWCID] = {
"currentVIN": slaveTWC.currentVIN,
"lastAmpsOffered": round(slaveTWC.lastAmpsOffered, 2),
"lastHeartbeat": round(time.time() - slaveTWC.timeLastRx, 2),
"carsCharging": slaveTWC.isCharging,
"lastVIN": slaveTWC.lastVIN,
"lifetimekWh": slaveTWC.lifetimekWh,
"maxAmps": float(slaveTWC.maxAmps),
"reportedAmpsActual": float(slaveTWC.reportedAmpsActual),
"chargerLoadInW": round(slaveTWC.getCurrentChargerLoad()),
"state": slaveTWC.reportedState,
"version": slaveTWC.protocolVersion,
"voltsPhaseA": slaveTWC.voltsPhaseA,
"voltsPhaseB": slaveTWC.voltsPhaseB,
"voltsPhaseC": slaveTWC.voltsPhaseC,
"TWCID": "%s" % TWCID,
}
if slaveTWC.lastChargingStart > 0:
data[TWCID]["chargeTime"] = str(timedelta(seconds=(time.time() - slaveTWC.lastChargingStart))).split(".")[0]
else:
data[TWCID]["chargeTime"] = "--:--:--"
# Adding some vehicle data
vehicle = slaveTWC.getLastVehicle()
if vehicle != None:
data[TWCID]["lastBatterySOC"] = vehicle.batteryLevel
data[TWCID]["lastChargeLimit"] = vehicle.chargeLimit
data[TWCID]["lastAtHome"] = vehicle.atHome
data[TWCID]["lastTimeToFullCharge"] = vehicle.timeToFullCharge
totals["lastAmpsOffered"] += slaveTWC.lastAmpsOffered
totals["lifetimekWh"] += slaveTWC.lifetimekWh
totals["maxAmps"] += slaveTWC.maxAmps
totals["reportedAmpsActual"] += slaveTWC.reportedAmpsActual
data["total"] = {
"lastAmpsOffered": round(totals["lastAmpsOffered"], 2),
"lifetimekWh": totals["lifetimekWh"],
"maxAmps": totals["maxAmps"],
"reportedAmpsActual": round(totals["reportedAmpsActual"], 2),
"TWCID": "total",
}
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(data)
self.wfile.write(json_data.encode("utf-8"))
elif self.url.path == "/api/getStatus":
data = master.getStatus()
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(data)
try:
self.wfile.write(json_data.encode("utf-8"))
except BrokenPipeError:
self.debugLogAPI("Connection Error: Broken Pipe")
elif self.url.path == "/api/getHistory":
output = []
now = datetime.now().replace(second=0, microsecond=0).astimezone()
startTime = now - timedelta(days=2) + timedelta(minutes=5)
endTime = now.replace(minute=math.floor(now.minute / 5) * 5)
startTime = startTime.replace(
minute=math.floor(startTime.minute / 5) * 5
)
source = (
master.settings["history"] if "history" in master.settings else []
)
data = {
k: v for k, v in source if datetime.fromisoformat(k) >= startTime
}
avgCurrent = 0
for slave in master.getSlaveTWCs():
avgCurrent += slave.historyAvgAmps
data[endTime.isoformat(timespec="seconds")] = master.convertAmpsToWatts(
avgCurrent
)
output = [
{
"timestamp": timestamp,
"charger_power": data[timestamp] if timestamp in data else 0,
}
for timestamp in [
(startTime + timedelta(minutes=5 * i)).isoformat(
timespec="seconds"
)
for i in range(48 * 12)
]
]
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(output)
self.wfile.write(json_data.encode("utf-8"))
else:
# All other routes missed, return 404
self.send_response(404)
self.end_headers()
self.wfile.write("".encode("utf-8"))
self.debugLogAPI("Ending API GET")
def do_API_POST(self):
self.debugLogAPI("Starting API POST")
if self.url.path == "/api/addConsumptionOffset":
data = {}
try:
data = json.loads(self.post_data.decode("UTF-8"))
except (ValueError, UnicodeDecodeError):
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
except json.decoder.JSONDecodeError:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
name = str(data.get("offsetName", None))
value = float(data.get("offsetValue", 0))
unit = str(data.get("offsetUnit", ""))
if (name and value and (unit == "A" or unit == "W")
and len(name) < 32 and not self.checkForUnsafeCharactters(name)):
if not master.settings.get("consumptionOffset", None):
master.settings["consumptionOffset"] = {}
master.settings["consumptionOffset"][name] = {}
master.settings["consumptionOffset"][name]["value"] = value
master.settings["consumptionOffset"][name]["unit"] = unit
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(204)
self.end_headers()
self.wfile.write("".encode("utf-8"))
else:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/chargeNow":
data = {}
try:
data = json.loads(self.post_data.decode("UTF-8"))
except (ValueError, UnicodeDecodeError):
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
except json.decoder.JSONDecodeError:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
rate = int(data.get("chargeNowRate", 0))
durn = int(data.get("chargeNowDuration", 0))
if rate <= 0 or durn <= 0:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
else:
master.setChargeNowAmps(rate)
master.setChargeNowTimeEnd(durn)
master.queue_background_task({"cmd": "saveSettings"})
master.getModuleByName("Policy").applyPolicyImmediately()
self.send_response(204)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/cancelChargeNow":
master.resetChargeNowAmps()
master.queue_background_task({"cmd": "saveSettings"})
master.getModuleByName("Policy").applyPolicyImmediately()
self.send_response(204)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/checkArrival":
master.queue_background_task({"cmd": "checkArrival"})
self.send_response(202)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/checkDeparture":
master.queue_background_task({"cmd": "checkDeparture"})
self.send_response(202)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/deleteConsumptionOffset":
data = json.loads(self.post_data.decode("UTF-8"))
name = str(data.get("offsetName", None))
if master.settings.get("consumptionOffset", None):
del master.settings["consumptionOffset"][name]
self.send_response(204)
self.end_headers()
self.wfile.write("".encode("utf-8"))
else:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
elif self.url.path == "/api/saveSettings":
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/sendDebugCommand":
data = json.loads(self.post_data.decode("UTF-8"))
packet = {
"Command": data.get("commandName", "")
}
if data.get("commandName", "") == "Custom":
packet["CustomCommand"] = data.get("customCommand", "")
# Clear last TWC response, so we can grab the next response
master.lastTWCResponseMsg = bytearray()
# Send packet to network
master.getModuleByName("RS485").send(
master.getModuleByName("TWCProtocol").createMessage(packet)
)
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/sendStartCommand":
master.sendStartCommand()
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/sendStopCommand":
master.sendStopCommand()
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/setSetting":
data = json.loads(self.post_data.decode("UTF-8"))
setting = str(data.get("setting", None))
value = str(data.get("value", None))
if (setting and value and
not self.checkForUnsafeCharactters(setting) and
not self.checkForUnsafeCharactters(value)):
master.settings[setting] = value
self.send_response(204)
self.end_headers()
elif self.url.path == "/api/setScheduledChargingSettings":
data = json.loads(self.post_data.decode("UTF-8"))
enabled = bool(data.get("enabled", False))
startingMinute = int(data.get("startingMinute", -1))
endingMinute = int(data.get("endingMinute", -1))
monday = bool(data.get("monday", False))
tuesday = bool(data.get("tuesday", False))
wednesday = bool(data.get("wednesday", False))
thursday = bool(data.get("thursday", False))
friday = bool(data.get("friday", False))
saturday = bool(data.get("saturday", False))
sunday = bool(data.get("sunday", False))
amps = int(data.get("amps", -1))
batterySize = int(
data.get("flexBatterySize", 100)
) # using 100 as default, because with this every available car at moment should be finished with charging at the ending time
flexStart = int(data.get("flexStartEnabled", False))
weekDaysBitmap = (
(1 if monday else 0)
+ (2 if tuesday else 0)
+ (4 if wednesday else 0)
+ (8 if thursday else 0)
+ (16 if friday else 0)
+ (32 if saturday else 0)
+ (64 if sunday else 0)
)
if (
not (enabled)
or startingMinute < 0
or endingMinute < 0
or amps <= 0
or weekDaysBitmap == 0
):
master.setScheduledAmpsMax(0)
master.setScheduledAmpsStartHour(-1)
master.setScheduledAmpsEndHour(-1)
master.setScheduledAmpsDaysBitmap(0)
else:
master.setScheduledAmpsMax(amps)
master.setScheduledAmpsStartHour(startingMinute / 60)
master.setScheduledAmpsEndHour(endingMinute / 60)
master.setScheduledAmpsDaysBitmap(weekDaysBitmap)
master.setScheduledAmpsBatterySize(batterySize)
master.setScheduledAmpsFlexStart(flexStart)
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(202)
self.end_headers()
self.wfile.write("".encode("utf-8"))
else:
# All other routes missed, return 404
self.send_response(404)
self.end_headers()
self.wfile.write("".encode("utf-8"))
self.debugLogAPI("Ending API POST")
def do_get_policy(self):
page = """
<table>
"""
j = 0
mod_policy = master.getModuleByName("Policy")
insertion_points = {0: "Emergency", 1: "Before", 3: "After"}
replaced = all(
x not in mod_policy.default_policy for x in mod_policy.charge_policy
)
for policy in mod_policy.charge_policy:
if policy in mod_policy.default_policy:
cat = "Default"
ext = insertion_points.get(j, None)
if ext:
page += "<tr><th>Policy Extension Point</th></tr>"
page += "<tr><td>" + ext + "</td></tr>"
j += 1
else:
cat = "Custom" if replaced else insertion_points.get(j, "Unknown")
page += (
"<tr><td> </td><td>"
+ policy["name"]
+ " ("
+ cat
+ ")</td></tr>"
)
page += "<tr><th> </th><th> </th><th>Match Criteria</th><th>Condition</th><th>Value</th></tr>"
for match, condition, value in zip(
policy["match"], policy["condition"], policy["value"]
):
page += "<tr><td> </td><td> </td>"
page += "<td>" + str(match)
match_result = mod_policy.policyValue(match)
if match != match_result:
page += " (" + str(match_result) + ")"
page += "</td>"
page += "<td>" + str(condition) + "</td>"
page += "<td>" + str(value)
value_result = mod_policy.policyValue(value)
if value != value_result:
page += " (" + str(value_result) + ")"
page += "</td></tr>"
page += """
</table>
</div>
</body>
"""
return page
def do_GET(self):
self.url = urllib.parse.urlparse(self.path)
# serve local static content files (from './lib/TWCManager/Control/static/' dir)
if self.url.path.startswith("/static/"):
content_type = mimetypes.guess_type(self.url.path)[0]
# only server know content type
if content_type is not None:
filename = (
pathlib.Path(__file__).resolve().parent.as_posix()
+ self.url.path
)
# check if static file exists and is readable
if os.path.isfile(filename) and os.access(filename, os.R_OK):
self.send_response(200)
self.send_header("Content-type", content_type)
self.end_headers()
# send static content (e.g. images) to browser
with open(filename, "rb") as staticFile:
self.wfile.write(staticFile.read())
return
else:
# static file doesn't exit or isn't readable
self.send_response(404)
return
# Service API requests
if self.url.path.startswith("/api/"):
self.do_API_GET()
return
webroutes = [
{ "route": "/debug", "tmpl": "debug.html.j2" },
{ "route": "/schedule", "tmpl": "schedule.html.j2" },
{ "route": "/settings", "tmpl": "settings.html.j2" },
{ "route": "/teslaAccount/login", "error": "insecure" },
{ "route": "/teslaAccount/mfaCode", "error": "insecure" },
{ "route": "/teslaAccount/submitCaptcha", "error": "insecure" },
{ "rstart": "/teslaAccount", "tmpl": "main.html.j2" },
{ "rstart": "/vehicleDetail", "tmpl": "vehicleDetail.html.j2" },
{ "route": "/vehicles", "tmpl": "vehicles.html.j2" }
]
if self.url.path == "/teslaAccount/getCaptchaImage":
self.send_response(200)
self.send_header("Content-type", "image/svg+xml")
self.end_headers()
self.wfile.write(master.getModuleByName(
"TeslaAPI"
).getCaptchaImage())
return
if self.url.path == "/":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Load "main" template and render
self.template = self.templateEnv.get_template("main.html.j2")
# Set some values that we use within the template
# Check if we're able to access the Tesla API
self.apiAvailable = master.getModuleByName(
"TeslaAPI"
).car_api_available()
self.scheduledAmpsMax = master.getScheduledAmpsMax()
# Send the html message
page = self.template.render(vars(self))
self.wfile.write(page.encode("utf-8"))
return
# Match web routes to defined webroutes routing
route = None
for webroute in webroutes:
if self.url.path == webroute.get("route", "INVALID"):
route = webroute
break
elif self.url.path.startswith(webroute.get("rstart", "INVALID")):
route = webroute
break
if route and route.get("error", None):
if route["error"] == "insecure":
# For security, these details should be submitted via a POST request
# Send a 405 Method Not Allowed in response.
self.send_response(405)
page = "This function may only be requested via the POST HTTP method."
self.wfile.write(page.encode("utf-8"))
return
else:
self.send_response(500)
self.wfile.write("".encode("utf-8"))
return
elif route:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Load debug template and render
self.template = self.templateEnv.get_template(route["tmpl"])
page = self.template.render(self.__dict__)
self.wfile.write(page.encode("utf-8"))
return
if self.url.path == "/policy":
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Load policy template and render
self.template = self.templateEnv.get_template("policy.html.j2")
page = self.template.render(self.__dict__)
page += self.do_get_policy()
self.wfile.write(page.encode("utf-8"))
return
if self.url.path.startswith("/vehicles/deleteGroup"):
group = urllib.parse.unquote(self.url.path.rsplit("/", 1)[1])
if group and len(group) > 0 and group in master.settings["VehicleGroups"]:
del master.settings["VehicleGroups"][group]
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(302)
self.send_header("Location", "/vehicles")
else:
self.send_response(400)
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
if self.url.path == "/graphs" or self.url.path == "/graphsP":
# We query the last 24h by default
now = datetime.now().replace(second=0, microsecond=0)
initial = now - timedelta(hours=24)
end = now
# It we came from a POST the dates should be already stored in settings
if self.url.path == "/graphs":
self.process_save_graphs(
str(initial.strftime("%Y-%m-%dT%H:%M")),
str(end.strftime("%Y-%m-%dT%H:%M")),
)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
# Load debug template and render
self.template = self.templateEnv.get_template("graphs.html.j2")
page = self.template.render(self.__dict__)
self.wfile.write(page.encode("utf-8"))
return
if self.url.path == "/graphs/date":
inicio = master.settings["Graphs"]["Initial"]
fin = master.settings["Graphs"]["End"]
self.process_graphs(inicio, fin)
return
# All other routes missed, return 404
self.send_response(404)
def do_POST(self):
# Parse URL
self.url = urllib.parse.urlparse(self.path)
# Parse POST parameters
self.fields.clear()
length = int(self.headers.get("content-length"))
self.post_data = self.rfile.read(length)
if self.url.path.startswith("/api/"):
self.do_API_POST()
return
self.fields = urllib.parse.parse_qs(self.post_data.decode("utf-8"))
if self.url.path == "/debug/save":
self.process_save_settings("debug")
return
if self.url.path == "/schedule/save":
# User has submitted schedule.
self.process_save_schedule()
return
if self.url.path == "/settings/save":
# User has submitted settings.
# Call dedicated function
self.process_save_settings()
return
if self.url.path == "/teslaAccount/login":
# User has submitted Tesla login.
# Pass it to the dedicated process_teslalogin function
self.process_teslalogin()
return
if self.url.path == "/teslaAccount/mfaCode":
transactionID = self.getFieldValue("transactionID")
mfaDevice = self.getFieldValue("mfaDevice")
mfaCode = self.getFieldValue("mfaCode")
resp = master.getModuleByName(
"TeslaAPI").mfaLogin(transactionID, mfaDevice, mfaCode)
self.send_response(302)
self.send_header("Location", "/teslaAccount/" + str(resp))
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
if self.url.path == "/teslaAccount/submitCaptcha":
captchaCode = self.getFieldValue("captchaCode")
resp = master.getModuleByName(
"TeslaAPI"
).submitCaptchaCode(captchaCode)
self.send_response(302)
self.send_header("Location", "/teslaAccount/" + str(resp))
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
if self.url.path == "/graphs/dates":
# User has submitted dates to graph this period.
objIni = self.getFieldValue("dateIni")
objEnd = self.getFieldValue("dateEnd")
if not objIni or not objEnd:
# Redirect back to graphs page if no Start or End time supplied
self.send_response(302)
self.send_header("Location", "/graphs")
else:
self.process_save_graphs(objIni, objEnd)
self.send_response(302)
self.send_header("Location", "/graphsP")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
if self.url.path == "/vehicle/groupMgmt":
group = self.getFieldValue("group")
op = self.getFieldValue("operation")
vin = self.getFieldValue("vin")
if op == "add":
try:
master.settings["VehicleGroups"][group]["Members"].append(vin)
except ValueError:
logger.error("Error adding vehicle %s to group %s" % (vin, group))
elif op == "remove":
try:
master.settings["VehicleGroups"][group]["Members"].remove(vin)
except ValueError:
logger.error("Error removing vehicle %s from group %s" % (vin, group))
master.queue_background_task({"cmd": "saveSettings"})
master.queue_background_task(
{
"cmd": "checkVINEntitlement",
"vin": vin,
}
)
self.send_response(302)
self.send_header("Location", "/vehicleDetail/"+vin)
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
# All other routes missed, return 404
self.send_response(404)
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def addButton(self, button_def, extrargs):
# This is a macro which can display differing buttons based on a
# condition. It's a useful way to switch the text on a button based
# on current state.
params = {}
if len(button_def) == 3:
params = button_def[2]
buttontype = "Submit"
if params.get("buttonType", False):
buttontype = params["buttonType"]
page = "<input type='%s' %s id='%s' value='%s'>" % (
buttontype,
extrargs,
button_def[0],
button_def[1],
)
return page
def chargeScheduleDay(self, day):
# Fetch current settings
sched = master.settings.get("Schedule", {})
today = sched.get(day, {})
suffix = day + "ChargeTime"
# Render daily schedule options
page = "<tr>"
page += (
"<td>"
+ self.checkBox("enabled" + suffix, today.get("enabled", 0))
+ "</td>"
)
page += "<td>" + str(day) + "</td>"
page += (
"<td>"
+ self.optionList(
self.timeList,
{"name": "start" + suffix, "value": today.get("start", "00:00")},
)
+ "</td>"
)
page += "<td> to </td>"
page += (
"<td>"
+ self.optionList(
self.timeList,
{"name": "end" + suffix, "value": today.get("end", "00:00")},
)
+ "</td>"
)
page += (
"<td>" + self.checkBox("flex" + suffix, today.get("flex", 0)) + "</td>"
)
page += "<td>Flex Charge</td>"
page += "</tr>"
return page
def checkForUnsafeCharactters(self, text):
# Detect some unsafe characters in user input
# The intention is to minimize the risk of either user input going into the settings file
# or a database without pre-sanitization. We'll reject strings with these characters in them.
unsafe_characters = '@#$%^&*"+<>;/'
if any(c in unsafe_characters for c in text):
return True
else:
return False
def getFieldValue(self, key):
# Parse the form value represented by key, and return the
# value either as an integer or string
keya = str(key)
try:
vala = self.fields[key][0].replace("'", "")
except KeyError:
return None
try:
if int(vala) or vala == "0":
return int(vala)
except ValueError:
return vala
def log_message(self, format, *args):
pass
def optionList(self, list, opts={}):
page = "<div class='form-group'>"
page += "<select class='form-control' id='%s' name='%s'>" % (
opts.get("name", ""),
opts.get("name", ""),
)
for option in list:
sel = ""
if str(opts.get("value", "-1")) == str(option[0]):
sel = "selected"
page += "<option value='%s' %s>%s</option>" % (
option[0],
sel,
option[1],
)
page += "</select>"
page += "</div>"
return page
def process_save_schedule(self):
# Check that schedule dict exists within settings.
# If not, this would indicate that this is the first time
# we have saved the new schedule settings
if master.settings.get("Schedule", None) == None:
master.settings["Schedule"] = {}
# Slight issue with checkboxes, you have to default them all to
# false, otherwise if one is unticked it is just not sent via form data
days = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
]
for day in days:
if master.settings["Schedule"].get(day, None) == None:
master.settings["Schedule"][day] = {}
master.settings["Schedule"][day]["enabled"] = ""
master.settings["Schedule"][day]["flex"] = ""
# Detect schedule keys. Rather than saving them in a flat
# structure, we'll store them multi-dimensionally
fieldsout = self.fields.copy()
ct = re.compile(
r"(?P<trigger>enabled|end|flex|start)(?P<day>.*?)ChargeTime"
)
for key in self.fields:
match = ct.match(key)
if match:
# Detected a multi-dimensional (per-day) key
# Rewrite it into the settings array and delete it
# from the input
if master.settings["Schedule"].get(match.group(2), None) == None:
# Create dictionary key for this day
master.settings["Schedule"][match.group(2)] = {}
# Set per-day settings
master.settings["Schedule"][match.group(2)][
match.group(1)
] = self.getFieldValue(key)
else:
if master.settings["Schedule"].get("Settings", None) == None:
master.settings["Schedule"]["Settings"] = {}
master.settings["Schedule"]["Settings"][key] = self.getFieldValue(
key
)
# During Phase 1 (backwards compatibility) for the new scheduling
# UI, after writing the settings in the inteded new format, we then
# write back to the existing settings nodes so that it is backwards
# compatible.
# Green Energy Tracking
master.settings["hourResumeTrackGreenEnergy"] = int(
master.settings["Schedule"]["Settings"]["resumeGreenEnergy"][:2]
)
# Scheduled amps
master.settings["scheduledAmpsStartHour"] = int(
master.settings["Schedule"]["Common"]["start"][:2]
)
master.settings["scheduledAmpsEndHour"] = int(
master.settings["Schedule"]["Common"]["end"][:2]
)
master.settings["scheduledAmpsMax"] = float(
master.settings["Schedule"]["Settings"]["scheduledAmpsMax"]
)
# Scheduled Days bitmap backward compatibility
master.settings["scheduledAmpsDaysBitmap"] = (
(1 if master.settings["Schedule"]["Monday"]["enabled"] else 0)
+ (2 if master.settings["Schedule"]["Tuesday"]["enabled"] else 0)
+ (4 if master.settings["Schedule"]["Wednesday"]["enabled"] else 0)
+ (8 if master.settings["Schedule"]["Thursday"]["enabled"] else 0)
+ (16 if master.settings["Schedule"]["Friday"]["enabled"] else 0)
+ (32 if master.settings["Schedule"]["Saturday"]["enabled"] else 0)
+ (64 if master.settings["Schedule"]["Sunday"]["enabled"] else 0)
)
# Save Settings
master.queue_background_task({"cmd": "saveSettings"})
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def process_save_settings(self, page = "settings"):
# This function will write the settings submitted from the settings
# page to the settings dict, before triggering a write of the settings
# to file
for key in self.fields:
# If the key relates to the car API tokens, we need to pass these
# to the appropriate module, rather than directly updating the
# configuration file (as it would just be overwritten)
if (
key == "carApiBearerToken" or key == "carApiRefreshToken"
) and self.getFieldValue(key) != "":
carapi = master.getModuleByName("TeslaAPI")
if key == "carApiBearerToken":
carapi.setCarApiBearerToken(self.getFieldValue(key))
elif key == "carApiRefreshToken":
carapi.setCarApiRefreshToken(self.getFieldValue(key))
# Write setting to dictionary
master.settings[key] = self.getFieldValue(key)
# If Non-Scheduled power action is either Do not Charge or
# Track Green Energy, set Non-Scheduled power rate to 0
if int(master.settings.get("nonScheduledAction", 1)) > 1:
master.settings["nonScheduledAmpsMax"] = 0
master.queue_background_task({"cmd": "saveSettings"})
# If triggered from the Debug page (not settings page), we need to
# set certain settings to false if they were not seen in the
# request data - This is because Check Boxes don't have a value
# if they aren't set
if page == "debug":
checkboxes = ["enableDebugCommands",
"spikeAmpsProactively",
"spikeAmpsReactively" ]
for checkbox in checkboxes:
if checkbox not in self.fields:
master.settings[checkbox] = 0
# Redirect to the index page
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def process_teslalogin(self):
# Check if we are skipping Tesla Login submission
if not master.teslaLoginAskLater:
later = False
try:
later = len(self.fields["later"][0])
except KeyError:
later = False
if later:
master.teslaLoginAskLater = True
if not master.teslaLoginAskLater:
# Connect to Tesla API
carapi = master.getModuleByName("TeslaAPI")
carapi.resetCarApiLastErrorTime()
try:
ret = carapi.apiLogin(
self.fields["email"][0], self.fields["password"][0]
)
except KeyError:
self.send_response(302)
self.send_header("Location", "/teslaAccount/NotSpecified")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
# Redirect to an index page with output based on the return state of
# the function
self.send_response(302)
self.send_header("Location", "/teslaAccount/" + str(ret))
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
else:
# User has asked to skip Tesla Account submission for this session
# Redirect back to /
self.send_response(302)
self.send_header("Location", "/")
self.end_headers()
self.wfile.write("".encode("utf-8"))
return
def process_save_graphs(self, initial, end):
# Check that Graphs dict exists within settings.
# If not, this would indicate that this is the first time
# we have saved it
if master.settings.get("Graphs", None) == None:
master.settings["Graphs"] = {}
master.settings["Graphs"]["Initial"] = initial
master.settings["Graphs"]["End"] = end
return
def process_graphs(self, init, end):
# This function will query the green_energy SQL table
result = {}
# We will use the first loaded logging module with query capabilities to build the graphs.
module = None
for candidate_module in master.getModulesByType("Logging"):
if candidate_module["ref"].getCapabilities("queryGreenEnergy"):
logger.log(
logging.INFO6,
"Logging module %s supports queryGreenEnergy",
candidate_module["name"],
)
module = candidate_module["ref"]
else:
logger.log(
logging.INFO6,
"Logging module %s does not support queryGreenEnergy",
candidate_module["name"],
)
# If we were unable to find a loaded Logging module with the capability to query
# values for graphs, return a HTTP error code
if not module:
self.send_response(400)
self.end_headers()
return
try:
result = module.queryGreenEnergy(
{
"dateBegin": datetime.strptime(init, "%Y-%m-%dT%H:%M"),
"dateEnd": datetime.strptime(end, "%Y-%m-%dT%H:%M"),
}
)
except Exception as e:
logger.exception("Excepcion queryGreenEnergy:")
data = {}
data[0] = {"initial": init, "end": end}
i = 1
while i < len(result):
data[i] = {
"time": result[i][0].strftime("%Y-%m-%dT%H:%M:%S"),
"genW": str(result[i][1]),
"conW": str(result[i][2]),
"chgW": str(result[i][3]),
}
i = i + 1
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
json_data = json.dumps(data)
try:
self.wfile.write(json_data.encode("utf-8"))
except BrokenPipeError:
logger.debug("Connection Error: Broken Pipe")
return
def debugLogAPI(self, message):
logger.debug(
message
+ " (Url: "
+ str(self.url.path)
+ " / IP: "
+ str(self.client_address[0])
+ ")"
)
return HTTPControlHandler
|
update_repository_manager.py
|
"""
Determine if installed tool shed repositories have updates available in their respective tool sheds.
"""
import logging
import threading
import tool_shed.util.shed_util_common as suc
from tool_shed.util import common_util
from tool_shed.util import encoding_util
log = logging.getLogger( __name__ )
class UpdateRepositoryManager( object ):
def __init__( self, app ):
self.app = app
self.context = self.app.install_model.context
# Ideally only one Galaxy server process should be able to check for repository updates.
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread( target=self.__restarter )
self.restarter.daemon = True
self.restarter.start()
self.seconds_to_sleep = int( app.config.hours_between_check * 3600 )
def get_update_to_changeset_revision_and_ctx_rev( self, repository ):
"""Return the changeset revision hash to which the repository can be updated."""
changeset_revision_dict = {}
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( self.app, str( repository.tool_shed ) )
params = '?name=%s&owner=%s&changeset_revision=%s' % ( str( repository.name ),
str( repository.owner ),
str( repository.installed_changeset_revision ) )
url = common_util.url_join( tool_shed_url, 'repository/get_changeset_revision_and_ctx_rev%s' % params )
try:
encoded_update_dict = common_util.tool_shed_get( self.app, tool_shed_url, url )
if encoded_update_dict:
update_dict = encoding_util.tool_shed_decode( encoded_update_dict )
includes_data_managers = update_dict.get( 'includes_data_managers', False )
includes_datatypes = update_dict.get( 'includes_datatypes', False )
includes_tools = update_dict.get( 'includes_tools', False )
includes_tools_for_display_in_tool_panel = update_dict.get( 'includes_tools_for_display_in_tool_panel', False )
includes_tool_dependencies = update_dict.get( 'includes_tool_dependencies', False )
includes_workflows = update_dict.get( 'includes_workflows', False )
has_repository_dependencies = update_dict.get( 'has_repository_dependencies', False )
has_repository_dependencies_only_if_compiling_contained_td = update_dict.get( 'has_repository_dependencies_only_if_compiling_contained_td', False )
changeset_revision = update_dict.get( 'changeset_revision', None )
ctx_rev = update_dict.get( 'ctx_rev', None )
changeset_revision_dict[ 'includes_data_managers' ] = includes_data_managers
changeset_revision_dict[ 'includes_datatypes' ] = includes_datatypes
changeset_revision_dict[ 'includes_tools' ] = includes_tools
changeset_revision_dict[ 'includes_tools_for_display_in_tool_panel' ] = includes_tools_for_display_in_tool_panel
changeset_revision_dict[ 'includes_tool_dependencies' ] = includes_tool_dependencies
changeset_revision_dict[ 'includes_workflows' ] = includes_workflows
changeset_revision_dict[ 'has_repository_dependencies' ] = has_repository_dependencies
changeset_revision_dict[ 'has_repository_dependencies_only_if_compiling_contained_td' ] = has_repository_dependencies_only_if_compiling_contained_td
changeset_revision_dict[ 'changeset_revision' ] = changeset_revision
changeset_revision_dict[ 'ctx_rev' ] = ctx_rev
except Exception, e:
log.debug( "Error getting change set revision for update from the tool shed for repository '%s': %s" % ( repository.name, str( e ) ) )
changeset_revision_dict[ 'includes_data_managers' ] = False
changeset_revision_dict[ 'includes_datatypes' ] = False
changeset_revision_dict[ 'includes_tools' ] = False
changeset_revision_dict[ 'includes_tools_for_display_in_tool_panel' ] = False
changeset_revision_dict[ 'includes_tool_dependencies' ] = False
changeset_revision_dict[ 'includes_workflows' ] = False
changeset_revision_dict[ 'has_repository_dependencies' ] = False
changeset_revision_dict[ 'has_repository_dependencies_only_if_compiling_contained_td' ] = False
changeset_revision_dict[ 'changeset_revision' ] = None
changeset_revision_dict[ 'ctx_rev' ] = None
return changeset_revision_dict
def __restarter( self ):
log.info( 'Update repository manager restarter starting up...' )
while self.running:
# Make a call to the Tool Shed for each installed repository to get the latest
# status information in the Tool Shed for the repository. This information includes
# items like newer installable repository revisions, current revision updates, whether
# the repository revision is the latest installable revision, and whether the repository
# has been deprecated in the Tool Shed.
for repository in self.context.query( self.app.install_model.ToolShedRepository ) \
.filter( self.app.install_model.ToolShedRepository.table.c.deleted == False ):
tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( self.app, repository )
if tool_shed_status_dict:
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
else:
# The received tool_shed_status_dict is an empty dictionary, so coerce to None.
tool_shed_status_dict = None
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
self.sleeper.sleep( self.seconds_to_sleep )
log.info( 'Update repository manager restarter shutting down...' )
def shutdown( self ):
self.running = False
self.sleeper.wake()
def update_repository_record( self, repository, updated_metadata_dict, updated_changeset_revision, updated_ctx_rev ):
"""
Update a tool_shed_repository database record with new information retrieved from the
Tool Shed. This happens when updating an installed repository to a new changeset revision.
"""
repository.metadata = updated_metadata_dict
# Update the repository.changeset_revision column in the database.
repository.changeset_revision = updated_changeset_revision
repository.ctx_rev = updated_ctx_rev
# Update the repository.tool_shed_status column in the database.
tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( self.app, repository )
if tool_shed_status_dict:
repository.tool_shed_status = tool_shed_status_dict
else:
repository.tool_shed_status = None
self.app.install_model.context.add( repository )
self.app.install_model.context.flush()
self.app.install_model.context.refresh( repository )
return repository
class Sleeper( object ):
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless* the notify method
is called (from a different thread).
"""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
upgrade_test.py
|
#!/usr/bin/env python3
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import glob
import os
from pathlib import Path
import platform
import random
import shutil
import stat
import subprocess
import sys
from threading import Thread, Event
import traceback
import time
from urllib import request
import hashlib
from local_cluster import LocalCluster, random_secret_string
SUPPORTED_PLATFORMS = ["x86_64"]
SUPPORTED_VERSIONS = [
"7.2.0",
"7.1.1",
"7.1.0",
"7.0.0",
"6.3.24",
"6.3.23",
"6.3.22",
"6.3.18",
"6.3.17",
"6.3.16",
"6.3.15",
"6.3.13",
"6.3.12",
"6.3.9",
"6.2.30",
"6.2.29",
"6.2.28",
"6.2.27",
"6.2.26",
"6.2.25",
"6.2.24",
"6.2.23",
"6.2.22",
"6.2.21",
"6.2.20",
"6.2.19",
"6.2.18",
"6.2.17",
"6.2.16",
"6.2.15",
"6.2.10",
"6.1.13",
"6.1.12",
"6.1.11",
"6.1.10",
"6.0.18",
"6.0.17",
"6.0.16",
"6.0.15",
"6.0.14",
"5.2.8",
"5.2.7",
"5.1.7",
"5.1.6",
]
FDB_DOWNLOAD_ROOT = "https://github.com/apple/foundationdb/releases/download/"
LOCAL_OLD_BINARY_REPO = "/opt/foundationdb/old/"
CURRENT_VERSION = "7.2.0"
HEALTH_CHECK_TIMEOUT_SEC = 5
PROGRESS_CHECK_TIMEOUT_SEC = 30
TRANSACTION_RETRY_LIMIT = 100
MAX_DOWNLOAD_ATTEMPTS = 5
RUN_WITH_GDB = False
def make_executable_path(path):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def remove_file_no_fail(filename):
try:
os.remove(filename)
except OSError:
pass
def version_from_str(ver_str):
ver = [int(s) for s in ver_str.split(".")]
assert len(ver) == 3, "Invalid version string {}".format(ver_str)
return ver
def api_version_from_str(ver_str):
ver_tuple = version_from_str(ver_str)
return ver_tuple[0] * 100 + ver_tuple[1] * 10
def version_before(ver_str1, ver_str2):
return version_from_str(ver_str1) < version_from_str(ver_str2)
def random_sleep(min_sec, max_sec):
time_sec = random.uniform(min_sec, max_sec)
print("Sleeping for {0:.3f}s".format(time_sec))
time.sleep(time_sec)
def compute_sha256(filename):
hash_function = hashlib.sha256()
with open(filename, "rb") as f:
while True:
data = f.read(128 * 1024)
if not data:
break
hash_function.update(data)
return hash_function.hexdigest()
def read_to_str(filename):
with open(filename, "r") as f:
return f.read()
class UpgradeTest:
def __init__(
self,
build_dir: str,
upgrade_path: list,
process_number: int = 1,
port: str = None,
):
self.build_dir = Path(build_dir).resolve()
assert self.build_dir.exists(), "{} does not exist".format(build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(build_dir)
self.upgrade_path = upgrade_path
for version in upgrade_path:
assert version in SUPPORTED_VERSIONS, "Unsupported version {}".format(
version
)
self.platform = platform.machine()
assert self.platform in SUPPORTED_PLATFORMS, "Unsupported platform {}".format(
self.platform
)
self.tmp_dir = self.build_dir.joinpath("tmp", random_secret_string(16))
self.tmp_dir.mkdir(parents=True)
self.download_dir = self.build_dir.joinpath("tmp", "old_binaries")
self.local_binary_repo = Path(LOCAL_OLD_BINARY_REPO)
if not self.local_binary_repo.exists():
self.local_binary_repo = None
self.download_old_binaries()
self.create_external_lib_dir()
init_version = upgrade_path[0]
self.cluster = LocalCluster(
self.tmp_dir,
self.binary_path(init_version, "fdbserver"),
self.binary_path(init_version, "fdbmonitor"),
self.binary_path(init_version, "fdbcli"),
process_number,
port=port,
create_config=False,
)
self.cluster.create_cluster_file()
self.configure_version(init_version)
self.log = self.cluster.log
self.etc = self.cluster.etc
self.data = self.cluster.data
self.input_pipe_path = self.tmp_dir.joinpath(
"input.{}".format(random_secret_string(8))
)
self.output_pipe_path = self.tmp_dir.joinpath(
"output.{}".format(random_secret_string(8))
)
os.mkfifo(self.input_pipe_path)
os.mkfifo(self.output_pipe_path)
self.progress_event = Event()
self.api_version = None
self.tester_retcode = None
self.tester_proc = None
self.output_pipe = None
self.tester_bin = None
self.ctrl_pipe = None
# Check if the binaries for the given version are available in the local old binaries repository
def version_in_local_repo(self, version):
return (self.local_binary_repo is not None) and (self.local_binary_repo.joinpath(version).exists())
def binary_path(self, version, bin_name):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("bin", bin_name)
elif self.version_in_local_repo(version):
return self.local_binary_repo.joinpath(version, "bin", "{}-{}".format(bin_name, version))
else:
return self.download_dir.joinpath(version, bin_name)
def lib_dir(self, version):
if version == CURRENT_VERSION:
return self.build_dir.joinpath("lib")
else:
return self.download_dir.joinpath(version)
# Download an old binary of a given version from a remote repository
def download_old_binary(
self, version, target_bin_name, remote_bin_name, make_executable
):
local_file = self.download_dir.joinpath(version, target_bin_name)
if local_file.exists():
return
# Download to a temporary file and then replace the target file atomically
# to avoid consistency errors in case of multiple tests are downloading the
# same file in parallel
local_file_tmp = Path("{}.{}".format(str(local_file), random_secret_string(8)))
self.download_dir.joinpath(version).mkdir(parents=True, exist_ok=True)
remote_file = "{}{}/{}".format(FDB_DOWNLOAD_ROOT, version, remote_bin_name)
remote_sha256 = "{}.sha256".format(remote_file)
local_sha256 = Path("{}.sha256".format(local_file_tmp))
for attempt_cnt in range(MAX_DOWNLOAD_ATTEMPTS + 1):
if attempt_cnt == MAX_DOWNLOAD_ATTEMPTS:
assert False, "Failed to download {} after {} attempts".format(
local_file_tmp, MAX_DOWNLOAD_ATTEMPTS
)
try:
print("Downloading '{}' to '{}'...".format(remote_file, local_file_tmp))
request.urlretrieve(remote_file, local_file_tmp)
print("Downloading '{}' to '{}'...".format(remote_sha256, local_sha256))
request.urlretrieve(remote_sha256, local_sha256)
print("Download complete")
except Exception as e:
print("Retrying on error:", e)
continue
assert local_file_tmp.exists(), "{} does not exist".format(local_file_tmp)
assert local_sha256.exists(), "{} does not exist".format(local_sha256)
expected_checksum = read_to_str(local_sha256)
actual_checkum = compute_sha256(local_file_tmp)
if expected_checksum == actual_checkum:
print("Checksum OK")
break
print(
"Checksum mismatch. Expected: {} Actual: {}".format(
expected_checksum, actual_checkum
)
)
os.rename(local_file_tmp, local_file)
os.remove(local_sha256)
if make_executable:
make_executable_path(local_file)
# Copy a client library file from the local old binaries repository
# The file needs to be renamed to libfdb_c.so, because it is loaded with this name by fdbcli
def copy_clientlib_from_local_repo(self, version):
dest_lib_file = self.download_dir.joinpath(version, "libfdb_c.so")
if dest_lib_file.exists():
return
src_lib_file = self.local_binary_repo.joinpath(version, "lib", "libfdb_c-{}.so".format(version))
assert src_lib_file.exists(), "Missing file {} in the local old binaries repository".format(src_lib_file)
self.download_dir.joinpath(version).mkdir(parents=True, exist_ok=True)
shutil.copyfile(src_lib_file, dest_lib_file)
assert dest_lib_file.exists(), "{} does not exist".format(dest_lib_file)
# Download all old binaries required for testing the specified upgrade path
def download_old_binaries(self):
for version in self.upgrade_path:
if version == CURRENT_VERSION:
continue
if self.version_in_local_repo(version):
self.copy_clientlib_from_local_repo(version)
continue
self.download_old_binary(
version, "fdbserver", "fdbserver.{}".format(self.platform), True
)
self.download_old_binary(
version, "fdbmonitor", "fdbmonitor.{}".format(self.platform), True
)
self.download_old_binary(
version, "fdbcli", "fdbcli.{}".format(self.platform), True
)
self.download_old_binary(
version, "libfdb_c.so", "libfdb_c.{}.so".format(self.platform), False
)
# Create a directory for external client libraries for MVC and fill it
# with the libraries necessary for the specified upgrade path
def create_external_lib_dir(self):
self.external_lib_dir = self.tmp_dir.joinpath("client_libs")
self.external_lib_dir.mkdir(parents=True)
for version in self.upgrade_path:
src_file_path = self.lib_dir(version).joinpath("libfdb_c.so")
assert src_file_path.exists(), "{} does not exist".format(src_file_path)
target_file_path = self.external_lib_dir.joinpath(
"libfdb_c.{}.so".format(version)
)
shutil.copyfile(src_file_path, target_file_path)
# Perform a health check of the cluster: Use fdbcli status command to check if the number of
# server processes and their versions are as expected
def health_check(self, timeout_sec=HEALTH_CHECK_TIMEOUT_SEC):
retries = 0
while retries < timeout_sec:
retries += 1
status = self.cluster.get_status()
if "processes" not in status["cluster"]:
print("Health check: no processes found. Retrying")
time.sleep(1)
continue
num_proc = len(status["cluster"]["processes"])
if num_proc < self.cluster.process_number:
print(
"Health check: {} of {} processes found. Retrying".format(
num_proc, self.cluster.process_number
)
)
time.sleep(1)
continue
assert (
num_proc == self.cluster.process_number
), "Number of processes: expected: {}, actual: {}".format(
self.cluster.process_number, num_proc
)
for (_, proc_stat) in status["cluster"]["processes"].items():
proc_ver = proc_stat["version"]
assert (
proc_ver == self.cluster_version
), "Process version: expected: {}, actual: {}".format(
self.cluster_version, proc_ver
)
print("Health check: OK")
return
assert False, "Health check: Failed"
# Create and save a cluster configuration for the given version
def configure_version(self, version):
self.cluster.fdbmonitor_binary = self.binary_path(version, "fdbmonitor")
self.cluster.fdbserver_binary = self.binary_path(version, "fdbserver")
self.cluster.fdbcli_binary = self.binary_path(version, "fdbcli")
self.cluster.set_env_var = "LD_LIBRARY_PATH", self.lib_dir(version)
if version_before(version, "7.1.0"):
self.cluster.use_legacy_conf_syntax = True
self.cluster.save_config()
self.cluster_version = version
# Upgrade the cluster to the given version
def upgrade_to(self, version):
print("Upgrading to version {}".format(version))
self.cluster.stop_cluster()
self.configure_version(version)
self.cluster.ensure_ports_released()
self.cluster.start_cluster()
print("Upgraded to {}".format(version))
def __enter__(self):
print("Starting cluster version {}".format(self.cluster_version))
self.cluster.start_cluster()
self.cluster.create_database(enable_tenants=False)
return self
def __exit__(self, xc_type, exc_value, traceback):
self.cluster.stop_cluster()
shutil.rmtree(self.tmp_dir)
# Determine FDB API version matching the upgrade path
def determine_api_version(self):
self.api_version = api_version_from_str(CURRENT_VERSION)
for version in self.upgrade_path:
self.api_version = min(api_version_from_str(version), self.api_version)
# Start the tester to generate the workload specified by the test file
def exec_workload(self, test_file):
self.tester_retcode = 1
try:
self.determine_api_version()
cmd_args = [
self.tester_bin,
"--cluster-file",
self.cluster.cluster_file,
"--test-file",
test_file,
"--external-client-dir",
self.external_lib_dir,
"--disable-local-client",
"--input-pipe",
self.input_pipe_path,
"--output-pipe",
self.output_pipe_path,
"--api-version",
str(self.api_version),
"--log",
"--log-dir",
self.log,
"--tmp-dir",
self.tmp_dir,
"--transaction-retry-limit",
str(TRANSACTION_RETRY_LIMIT),
]
if RUN_WITH_GDB:
cmd_args = ["gdb", "-ex", "run", "--args"] + cmd_args
print(
"Executing test command: {}".format(
" ".join([str(c) for c in cmd_args])
)
)
self.tester_proc = subprocess.Popen(
cmd_args, stdout=sys.stdout, stderr=sys.stderr
)
self.tester_retcode = self.tester_proc.wait()
self.tester_proc = None
if self.tester_retcode != 0:
print("Tester failed with return code {}".format(self.tester_retcode))
except Exception:
print("Execution of test workload failed")
print(traceback.format_exc())
finally:
# If the tester failed to initialize, other threads of the test may stay
# blocked on trying to open the named pipes
if self.ctrl_pipe is None or self.output_pipe is None:
print("Tester failed before initializing named pipes. Aborting the test")
os._exit(1)
# Perform a progress check: Trigger it and wait until it is completed
def progress_check(self):
self.progress_event.clear()
os.write(self.ctrl_pipe, b"CHECK\n")
self.progress_event.wait(None if RUN_WITH_GDB else PROGRESS_CHECK_TIMEOUT_SEC)
if self.progress_event.is_set():
print("Progress check: OK")
else:
assert False, "Progress check failed after upgrade to version {}".format(
self.cluster_version
)
# The main function of a thread for reading and processing
# the notifications received from the tester
def output_pipe_reader(self):
try:
print("Opening pipe {} for reading".format(self.output_pipe_path))
self.output_pipe = open(self.output_pipe_path, "r")
for line in self.output_pipe:
msg = line.strip()
print("Received {}".format(msg))
if msg == "CHECK_OK":
self.progress_event.set()
self.output_pipe.close()
except Exception as e:
print("Error while reading output pipe", e)
print(traceback.format_exc())
# Execute the upgrade test workflow according to the specified
# upgrade path: perform the upgrade steps and check success after each step
def exec_upgrade_test(self):
print("Opening pipe {} for writing".format(self.input_pipe_path))
self.ctrl_pipe = os.open(self.input_pipe_path, os.O_WRONLY)
try:
self.health_check()
self.progress_check()
for version in self.upgrade_path[1:]:
random_sleep(0.0, 2.0)
self.upgrade_to(version)
self.health_check()
self.progress_check()
os.write(self.ctrl_pipe, b"STOP\n")
finally:
os.close(self.ctrl_pipe)
# Kill the tester process if it is still alive
def kill_tester_if_alive(self, workload_thread):
if not workload_thread.is_alive():
return
if self.tester_proc is not None:
try:
print("Killing the tester process")
self.tester_proc.kill()
workload_thread.join(5)
except Exception:
print("Failed to kill the tester process")
# The main method implementing the test:
# - Start a thread for generating the workload using a tester binary
# - Start a thread for reading notifications from the tester
# - Trigger the upgrade steps and checks in the main thread
def exec_test(self, args):
self.tester_bin = self.build_dir.joinpath("bin", "fdb_c_api_tester")
assert self.tester_bin.exists(), "{} does not exist".format(self.tester_bin)
self.tester_proc = None
test_retcode = 1
try:
workload_thread = Thread(target=self.exec_workload, args=(args.test_file,))
workload_thread.start()
reader_thread = Thread(target=self.output_pipe_reader)
reader_thread.start()
self.exec_upgrade_test()
test_retcode = 0
except Exception:
print("Upgrade test failed")
print(traceback.format_exc())
self.kill_tester_if_alive(workload_thread)
finally:
workload_thread.join(5)
reader_thread.join(5)
self.kill_tester_if_alive(workload_thread)
if test_retcode == 0:
test_retcode = self.tester_retcode
return test_retcode
def grep_logs_for_events(self, severity):
return (
subprocess.getoutput(
"grep -r 'Severity=\"{}\"' {}".format(
severity, self.cluster.log.as_posix()
)
)
.rstrip()
.splitlines()
)
# Check the cluster log for errors
def check_cluster_logs(self, error_limit=100):
sev40s = (
subprocess.getoutput(
"grep -r 'Severity=\"40\"' {}".format(self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
err_cnt = 0
for line in sev40s:
# When running ASAN we expect to see this message. Boost coroutine should be using the
# correct asan annotations so that it shouldn't produce any false positives.
if line.endswith(
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false "
"positives in some cases! "
):
continue
if err_cnt < error_limit:
print(line)
err_cnt += 1
if err_cnt > 0:
print(
">>>>>>>>>>>>>>>>>>>> Found {} severity 40 events - the test fails",
err_cnt,
)
else:
print("No errors found in logs")
return err_cnt == 0
# Check the server and client logs for warnings and dump them
def dump_warnings_in_logs(self, limit=100):
sev30s = (
subprocess.getoutput(
"grep -r 'Severity=\"30\"' {}".format(self.cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
if len(sev30s) == 0:
print("No warnings found in logs")
else:
print(
">>>>>>>>>>>>>>>>>>>> Found {} severity 30 events (warnings):".format(
len(sev30s)
)
)
for line in sev30s[:limit]:
print(line)
# Dump the last cluster configuration and cluster logs
def dump_cluster_logs(self):
for etc_file in glob.glob(os.path.join(self.cluster.etc, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(etc_file))
with open(etc_file, "r") as f:
print(f.read())
for log_file in glob.glob(os.path.join(self.cluster.log, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(log_file))
with open(log_file, "r") as f:
print(f.read())
if __name__ == "__main__":
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
A script for testing FDB multi-version client in upgrade scenarios. Creates a local cluster,
generates a workload using fdb_c_api_tester with a specified test file, and performs
cluster upgrade according to the specified upgrade path. Checks if the workload successfully
progresses after each upgrade step.
""",
)
parser.add_argument(
"--build-dir",
"-b",
metavar="BUILD_DIRECTORY",
help="FDB build directory",
required=True,
)
parser.add_argument(
"--upgrade-path",
nargs="+",
help="Cluster upgrade path: a space separated list of versions",
default=[CURRENT_VERSION],
)
parser.add_argument(
"--test-file",
help="A .toml file describing a test workload to be generated with fdb_c_api_tester",
required=True,
)
parser.add_argument(
"--process-number",
"-p",
help="Number of fdb processes running (default: 0 - random)",
type=int,
default=0,
)
parser.add_argument(
"--disable-log-dump",
help="Do not dump cluster log on error",
action="store_true",
)
parser.add_argument(
"--run-with-gdb", help="Execute the tester binary from gdb", action="store_true"
)
args = parser.parse_args()
if args.process_number == 0:
args.process_number = random.randint(1, 5)
print("Testing with {} processes".format(args.process_number))
if args.run_with_gdb:
RUN_WITH_GDB = True
errcode = 1
with UpgradeTest(args.build_dir, args.upgrade_path, args.process_number) as test:
print("log-dir: {}".format(test.log))
print("etc-dir: {}".format(test.etc))
print("data-dir: {}".format(test.data))
print("cluster-file: {}".format(test.etc.joinpath("fdb.cluster")))
errcode = test.exec_test(args)
if not test.check_cluster_logs():
errcode = 1 if errcode == 0 else errcode
test.dump_warnings_in_logs()
if errcode != 0 and not args.disable_log_dump:
test.dump_cluster_logs()
sys.exit(errcode)
|
plugin.py
|
import base64
import re
import threading
from binascii import hexlify, unhexlify
from functools import partial
from electrum_zaap.bitcoin import (bc_address_to_hash_160, xpub_from_pubkey,
public_key_to_p2pkh, EncodeBase58Check,
TYPE_ADDRESS, TYPE_SCRIPT,
TESTNET, ADDRTYPE_P2PKH, ADDRTYPE_P2SH)
from electrum_zaap.i18n import _
from electrum_zaap.plugins import BasePlugin, hook
from electrum_zaap.transaction import deserialize, Transaction
from electrum_zaap.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "zaap Testnet" if TESTNET else "zaap"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER and self.device == 'TREZOR':
# Warn user about firmware lameness
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m')
client.used()
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = signed_tx.encode('hex')
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
client.get_address(self.get_coin_name(), address_n, True)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, x_pubkey.decode('hex'))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: x.decode('hex')[:-1] if x else '', txin.get('signatures')),
m=txin.get('num_sig'),
)
txinputtype = self.types.TxInputType(
script_type=self.types.SPENDMULTISIG,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = txin['scriptSig'].decode('hex')
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = bc_address_to_hash_160(address)
index, xpubs, m = info
if addrtype == ADDRTYPE_P2PKH:
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = self.types.PAYTOADDRESS,
address_n = address_n,
)
elif addrtype == ADDRTYPE_P2SH:
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
script_type = self.types.PAYTOMULTISIG)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = bc_address_to_hash_160(address)
if addrtype == ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype')
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = vout['scriptPubKey'].decode('hex')
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
ScInit.py
|
import sys, getopt, struct, time, termios, fcntl, sys, os, colorsys, threading, datetime, subprocess, json
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/fbtft')
from RenderManager import RenderManager
from WanemManager import WanemManager
from HttpUtil import HttpUtil
from LogReporter import LogReporter
from ScBase import ScBase
from gfx import Rect
from DataAsset import CTX
from pprint import pprint
class ScInit(ScBase):
def __init__(self, pCTX, pRender, pWanem):
super(ScInit, self).__init__(pCTX, pRender, pWanem)
self.tickCnt = 0
self.tickDuration = 3
self.prevTickCnt = -1
self.stepLabel = [".", "..", "...", "OK", "ERR"]
self.worker = None
self.workerRet = 0
self.STATE_CHK_NETWORK = 1
self.STATE_GET_INFO = 2
self.STATE_CHK_EMULATE_DAT = 3
self.STATE_CHK_WIFI_DONGLE = 4
self.STATE_SETUP_AP = 5
self.STATE_CHK_LAN_INTERFACE = 6
def CheckHttpConnectivity(self):
print "-------------------------------"
while HttpUtil.CheckConnectivity(self.pCTX.connectivityCheckUrl, 1,
self.pCTX.httpsProxy) == False:
time.sleep(1)
self.workerRet = 3
print "-------------------------------"
return
def GetApiInfo(self):
apiUrl = self.pCTX.infoApiUrl
savePath = "/tmp/WanemApiInfo.json"
print "-------------------------------"
while HttpUtil.Get(apiUrl, savePath, 1, self.pCTX.httpsProxy) == False:
time.sleep(1)
file = open(savePath)
dat = json.load(file)
file.close()
#pprint(dat)
self.pCTX.apiStatus = dat["status"]["maintStatus"]
self.workerRet = 3
print "-------------------------------"
return
def CheckWanemDat(self):
print "-------------------------------"
cmd = "php /home/pi/EM-uNetPi/scripts/php/SyncDat.php"
print cmd
ret = False
try:
subprocess.check_call(cmd.strip().split(" "))
ret = True
self.workerRet = 3
except subprocess.CalledProcessError:
ret = False
self.workerRet = 4
print str(ret)
print "-------------------------------"
return
def SetupAP(self):
print "-------------------------------"
cmd = "php /home/pi/EM-uNetPi/scripts/php/UpdateHostapdConf.php wanem-" + self.GetSelfId(
)
print cmd
ret = False
try:
subprocess.check_call(cmd.strip().split(" "))
ret = True
self.workerRet = 3
except subprocess.CalledProcessError:
ret = False
self.workerRet = 4
print str(ret)
print "-------------------------------"
return
def CheckLanInterface(self):
print "-------------------------------"
cmd = "ifconfig eth2"
ret = False
try:
subprocess.check_call(cmd.strip().split(" "))
self.pCTX.lanMode = self.pCTX.LAN_MODE_HYBRID
ret = True
self.workerRet = 3
except subprocess.CalledProcessError:
self.pCTX.lanMode = self.pCTX.LAN_MODE_WLAN
ret = True
self.workerRet = 3
print str(ret)
print "-------------------------------"
return
def CheckWifiDongle(self):
print "-------------------------------"
cmd = "lsusb -d 0411:0242"
ret = False
try:
subprocess.check_call(cmd.strip().split(" "))
self.pCTX.wifiDongleExist = True
ret = True
self.workerRet = 3
except subprocess.CalledProcessError:
self.pCTX.wifiDongleExist = False
ret = True
self.workerRet = 3
cmd = "cat /etc/wanem/apmode.prop"
try:
currentApMode = int(
subprocess.check_output(cmd.strip().split(" ")).replace(
'\n', ''))
except subprocess.CalledProcessError:
currentApMode = 0
# OverWrite to 2.4GHz Mode
if currentApMode == 1 and self.pCTX.wifiDongleExist == False:
cmd = "cp /etc/wanem/tpl/0.prop /etc/wanem/apmode.prop"
try:
subprocess.check_call(cmd.strip().split(" "))
except subprocess.CalledProcessError:
print("Update Ap Mode Fail")
cmd = "cp /etc/wanem/tpl/2.prop /etc/wanem/apchannel.prop"
try:
subprocess.check_call(cmd.strip().split(" "))
except subprocess.CalledProcessError:
print("Update Ap Channel Fail")
cmd = "cp /etc/wanem/tpl/raspi-blacklist-5.conf /etc/modprobe.d/raspi-blacklist.conf"
try:
subprocess.check_call(cmd.strip().split(" "))
except subprocess.CalledProcessError:
print("Update Module Blacklist Fail")
print "WifiDongle Exist : " + str(self.pCTX.wifiDongleExist)
print "-------------------------------"
return
def Start(self):
super(ScInit, self).Start()
##[ INIT STATE ]################################################################
self.state = self.STATE_TERM
self.nextScene = "Menu"
#self.nextScene = "ManualEx"
self.state = self.STATE_CHK_NETWORK
#self.state = self.STATE_CHK_EMULATE_DAT
#self.workerRet = 0
self.worker = threading.Thread(target=self.CheckHttpConnectivity,
args=())
#self.worker = threading.Thread(target=self.CheckWanemDat, args=())
self.worker.start()
##[ RENDER ]################################################################
self.pRender.UpdateTitle("Boot - rev : " + self.pCTX.revision)
c = yellow = self.pRender.fb.rgb(255, 255, 0)
self.pRender.fb.draw.rect(c, Rect(0, 54, self.pRender.xres, 1), 0)
label = "%-18s [ ]" % "CHK NETWORK"
self.pRender.fb.putstr(20, 74 + 32 * 0, label, self.pRender.W, 2)
label = "%-18s [ ]" % "GET API INFO"
self.pRender.fb.putstr(20, 74 + 32 * 1, label, self.pRender.W, 2)
label = "%-18s [ ]" % "CHK WIFI DONGLE"
self.pRender.fb.putstr(20, 74 + 32 * 2, label, self.pRender.W, 2)
label = "%-18s [ ]" % "SETUP AP"
self.pRender.fb.putstr(20, 74 + 32 * 3, label, self.pRender.W, 2)
label = "%-18s [ ]" % "CHK LAN INTERFACE"
self.pRender.fb.putstr(20, 74 + 32 * 4, label, self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32 * 0, " - ", self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32 * 1, " - ", self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32 * 2, " - ", self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32 * 3, " - ", self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32 * 4, " - ", self.pRender.W, 2)
#self.pRender.fb.draw.rect(self.pRender.W, Rect(271, 74, 40, 16), 0)
return
def Update(self):
if self.pCTX.tick == 1:
self.tickCnt = (self.tickCnt + 1) % self.tickDuration
if self.state == self.STATE_CHK_NETWORK:
if self.worker.isAlive() == False:
self.worker.join()
self.UpdateProgress(0, self.workerRet)
self.state = self.STATE_GET_INFO
self.worker = None
self.workerRet = 0
self.worker = threading.Thread(target=self.GetApiInfo, args=())
self.worker.start()
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(0, self.tickCnt)
elif self.state == self.STATE_GET_INFO:
if self.worker.isAlive() == False:
self.worker.join()
self.UpdateProgress(1, self.workerRet)
self.state = self.STATE_CHK_WIFI_DONGLE
self.worker = None
self.workerRet = 0
self.worker = threading.Thread(target=self.CheckWifiDongle,
args=())
self.worker.start()
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(1, self.tickCnt)
elif self.state == self.STATE_CHK_WIFI_DONGLE:
if self.worker.isAlive() == False:
self.worker.join()
self.UpdateProgress(2, self.workerRet)
self.state = self.STATE_SETUP_AP
self.worker = None
self.workerRet = 0
self.worker = threading.Thread(target=self.SetupAP, args=())
self.worker.start()
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(2, self.tickCnt)
elif self.state == self.STATE_SETUP_AP:
if self.worker.isAlive() == False:
self.worker.join()
self.UpdateProgress(3, self.workerRet)
self.state = self.STATE_CHK_LAN_INTERFACE
self.worker = None
self.workerRet = 0
self.worker = threading.Thread(target=self.CheckLanInterface,
args=())
self.worker.start()
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(3, self.tickCnt)
elif self.state == self.STATE_CHK_LAN_INTERFACE:
if self.worker.isAlive() == False:
self.worker.join()
self.UpdateProgress(4, self.workerRet)
if self.pCTX.apiStatus == 0:
LogReporter.SendLog(self.pCTX, 1, "StartUp")
self.state = self.STATE_TERM
self.worker = None
print self.pCTX.lanMode
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(4, self.tickCnt)
self.prevTickCnt = self.tickCnt
return
def UpdateProgress(self, target, step):
if step == 3:
c = self.pRender.G
elif step == 4:
c = self.pRender.R
else:
c = self.pRender.W
self.pRender.fb.draw.rect(self.pRender.N,
Rect(271, 74 + 32 * target, 40, 16), 0)
self.pRender.fb.putstr(273, 74 + 32 * target, self.stepLabel[step], c,
2)
|
mainSaberMgt.py
|
#===============================================================================
# Management application for Saber Lights
# Jon Durrant
# 14-Jan-2022
#===============================================================================
from flask import Flask,flash, redirect, request, send_from_directory
from flask import url_for, render_template
import json
import os
import sys
import threading
import logging
from mqttAgent import MQTTAgent
from mqttObserver import MQTTObserver
from mqttRouterPing import MQTTRouterPing
from mqttRouterTwinClient import MQTTRouterTwinClient
import datetime
# set the project root directory as the static folder, you can set others.
app = Flask(__name__, static_url_path='/static')
#===============================================================================
# Root page redirect
#===============================================================================
@app.route('/')
def route():
#return redirect("/static/html/index.html")
return redirect("/static/html/Saber.html")
#===============================================================================
# Get current saber configuration
#===============================================================================
@app.route('/api/getSabers', methods = ['GET','POST'])
def getSabers():
columns=[
"drgb",
"nrgb",
"days",
"daye",
"dseq",
"nseq",
"timer",
"temp",
"on",
"day",
"clock"
]
select= ["clientId"]
asColumn = ["clientId"]
for c in columns:
select.append("reported.%s"%c)
asColumn.append(c)
#Make sure we only pull back data on actual saber lights, as other things could be in group
where = {'column': "reported.temp", 'op': ">", 'value': 0}
d = twinClient.query(select, asColumn, where, orient="records")
if ("res" in d):
table = recordsToTable(d["res"], "clientId")
return table
return {}
#===============================================================================
# Set sabers to given values
#===============================================================================
@app.route('/api/setSabers', methods = ['GET','POST'])
def setSabers():
if (request.json != None):
targetId = request.json.get("target", None)
delta = request.json.get("delta", {})
if (targetId):
targets = [targetId]
else:
targets = []
logging.debug("Requesting %s, %s"%(json.dumps(delta), json.dumps(targets)))
twinClient.update(delta, targets)
return {}
#===============================================================================
# Turn all sabers on or off.
#===============================================================================
@app.route('/api/saberOn', methods = ['GET','POST'])
def saberOn():
if (request.json != None):
on = request.json.get("on", True)
delta = {"on": on}
twinClient.update(delta, [])
return {}
#===============================================================================
# Convert a Pandas record json format into Google charts table format
#===============================================================================
def recordsToTable(recs, indexCol):
typeConv={ str: "string",
int: "number",
float: "number",
bool: "boolean",
datetime: "datetime"
}
table = {"cols": [], "rows": []}
#print("rec=%s\n"%json.dumps(recs))
#print("empty=%s\n"%json.dumps(table))
row = recs[0]
for c in row:
cell = row[c]
t=type(cell)
nt = typeConv.get(t, "string")
#print("Col: id:%s orig: %s type:%s label:%s"%(c, t, nt, c))
table["cols"].append({"id": c, "type": nt, "label": c})
#print("cols=%s\n"%json.dumps(table))
for r in recs:
list=[]
for ch in table["cols"]:
c = ch["id"]
list.append({"v": r[c]})
row = {}
row["c"]=list
table["rows"].append(row)
#print("rows=%s\n"%json.dumps(table))
return table
#===============================================================================
# Start up the MQTT service
#===============================================================================
def startMQTT():
global twinClient
#MQTT Credentials and targets
mqttUser=os.environ.get("MQTT_USER")
mqttPwd=os.environ.get("MQTT_PASSWD")
mqttTarget= os.environ.get("MQTT_HOST")
mqttPort=int(os.environ.get("MQTT_PORT"))
mqttCert=os.environ.get("MQTT_CERT", None)
tls=""
if (mqttCert != None):
tls="TLS"
logging.info("MQTT %s:%d %s - %s\n"%(mqttTarget,mqttPort,tls,mqttUser))
#The MQTT Client Agent
mqttAgent = MQTTAgent(mqttUser)
mqttAgent.credentials(mqttUser, mqttPwd)
mqttAgent.mqttHub(mqttTarget, mqttPort, True, mqttCert)
#Consigure the observers and routers
mqttObs = MQTTObserver()
pingRouter = MQTTRouterPing(mqttUser)
twinClient = MQTTRouterTwinClient(mqttUser, "saber", mqttAgent)
#Add observers and reouter to client agent
mqttAgent.addObserver(mqttObs)
mqttAgent.addRouter(pingRouter)
mqttAgent.addRouter(twinClient)
mqttAgent.start()
def setupApp():
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
logging.basicConfig(level=LOGLEVEL,
format= '[%(asctime)s] {%(name)s:%(lineno)d} %(levelname)s - %(message)s')
app.secret_key = 'LCARS'
app.config['SESSION_TYPE'] = 'filesystem'
#Run MQTT Aget in a thread
thread = threading.Thread(target = startMQTT)
thread.start()
setupApp()
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
evaluate_dist.py
|
import sys
sys.path.append('.')
import os
import tqdm
import torch
import random
import shutil
import argparse
import numpy as np
import multiprocessing
from collections import defaultdict
from torch.utils.data import DataLoader
from data.aug import ops
from data.dataset import DOTA1_5
from data.aug.compose import Compose
from data.dataset.dota1_5 import NAMES
from model.net import Net
from model.backbone import resnet
from utils.utils import hyp_parse
from utils.box.bbox_np import xywha2xy4, xy42xywha
from utils.box.rbbox_np import rbbox_batched_nms
from utils.parallel import CustomDetDataParallel
from torch import distributed as dist
from torch.nn import SyncBatchNorm
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@torch.no_grad()
def main(args, rank, world_size, res):
torch.cuda.set_device(rank)
dist.init_process_group("nccl", init_method='env://', rank=rank, world_size=world_size)
set_seed(0)
torch.backends.cudnn.benchmark = True
backbone = resnet.resnet101
batch_size = 8
num_workers = 4
image_size = 768
data_dir = 'data/DOTA1_5'
dir_save = 'weights/dota1_5_weight'
image_set = 'test'
checkpoint = os.path.join(dir_save, args.ckpt)
aug = Compose([ops.PadSquare(), ops.Resize(image_size)])
dataset = DOTA1_5(data_dir, image_set, aug)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset, world_size,
rank)
batch_sampler = torch.utils.data.BatchSampler(test_sampler, batch_size, drop_last=True)
loader = DataLoader(dataset, batch_sampler=batch_sampler,\
num_workers=num_workers, pin_memory=True, collate_fn=dataset.collate)
num_classes = len(dataset.names)
prior_box = {
'strides': [8, 16, 32, 64, 128],
'sizes': [3] * 5,
'aspects': [[1, 2, 4, 8]] * 5,
'scales': [[2 ** 0, 2 ** (1 / 3), 2 ** (2 / 3)]] * 5,
}
conf_thresh = 0.01
cfg = {
'prior_box': prior_box,
'num_classes': num_classes,
'extra': 2,
'conf_thresh': conf_thresh,
}
device = torch.device(f'cuda:{rank}')
model = Net(backbone(fetch_feature=True), cfg)
model.build_pipe(shape=[2, 3, image_size, image_size])
model = SyncBatchNorm.convert_sync_batchnorm(model)
model.to(device)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])
model.module.load_state_dict(torch.load(checkpoint, map_location=device))
model.eval()
ret_raw = defaultdict(list)
for images, targets, infos in tqdm.tqdm(loader):
images = images.cuda() / 255
dets = model(images)
for (det, info) in zip(dets, infos):
if det:
bboxes, scores, labels = det
bboxes = bboxes.cpu().numpy()
scores = scores.cpu().numpy()
labels = labels.cpu().numpy()
fname, x, y, w, h = os.path.splitext(os.path.basename(info['img_path']))[0].split('-')[:5]
x, y, w, h = int(x), int(y), int(w), int(h)
long_edge = max(w, h)
pad_x, pad_y = (long_edge - w) // 2, (long_edge - h) // 2
bboxes = np.stack([xywha2xy4(bbox) for bbox in bboxes])
bboxes *= long_edge / image_size
bboxes -= [pad_x, pad_y]
bboxes += [x, y]
bboxes = np.stack([xy42xywha(bbox) for bbox in bboxes])
ret_raw[fname].append([bboxes, scores, labels])
res.update(ret_raw)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Distributed evaluation for DOTA1_5 dataset...')
parser.add_argument('--gpus', help='num of gpus')
parser.add_argument('--ckpt', help='checkpoint')
parser.add_argument('--use_voc07', help='voc07 or voc10 metric')
parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training')
args = parser.parse_args()
nms_thresh = 0.45
multiprocessing.set_start_method('spawn')
if ',' in args.gpus:
device_ids = [eval(x) for x in args.gpus.split(',') if len(x)!=0]
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(id) for id in device_ids])
device_ids = list(range(len(device_ids)))
else:
device_ids = [x for x in range(int(eval(args.gpus)))]
res = multiprocessing.Manager().dict()
processes = []
for device_id in device_ids:
p = multiprocessing.Process(target=main, args=(args, device_id, len(device_ids), res))
p.start()
processes.append(p)
for p in processes:
p.join()
print('merging results...')
ret = []
for fname, dets in res.items():
bboxes, scores, labels = zip(*dets)
bboxes = np.concatenate(list(bboxes))
scores = np.concatenate(list(scores))
labels = np.concatenate(list(labels))
keeps = rbbox_batched_nms(bboxes, scores, labels, nms_thresh)
ret.append([fname, [bboxes[keeps], scores[keeps], labels[keeps]]])
print('converting to submission format...')
ret_save = defaultdict(list)
for fname, (bboxes, scores, labels) in ret:
for bbox, score, label in zip(bboxes, scores, labels):
bbox = xywha2xy4(bbox).ravel()
line = '%s %.12f %.1f %.1f %.1f %.1f %.1f %.1f %.1f %.1f' % (fname, score, *bbox)
ret_save[NAMES[label]].append(line)
print('saving...')
os.makedirs('submission', exist_ok=True)
for name, dets in ret_save.items():
with open(os.path.join('submission', 'Task%d_%s.txt' % (1, name)), 'wt') as f:
f.write('\n'.join(dets))
print('creating submission...')
if os.path.exists('Task1.zip'):
os.remove('Task1.zip')
os.system('zip -j Task1.zip {}'.format('submission/*'))
shutil.rmtree('submission')
print('finished')
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
Finished by Haoxiong Liu
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size,
activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
with tf.variable_scope(scope):
# hidden layers
h_placeholder = tf.layers.dense(input_placeholder, size, activation)
for _ in range(n_layers-1):
h_placeholder = tf.layers.dense(h_placeholder, size, activation)
# output layer
output_placeholder = tf.layers.dense(h_placeholder, output_size, output_activation)
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# define placeholder for advantages
sy_adv_n = tf.placeholder(shpae=[None], name='adv', dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# outputs: logits of categorical distribution (mlp size: ac_dim)
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, "parameter", self.n_layers, self.size)
return sy_logits_na
else:
# outputs: by network sy_mean, trainable variable sy_logstd
sy_mean = build_mlp(sy_ob_no, self.ac_dim, "parameter", self.n_layers, self.size)
sy_logstd = tf.get_variable("logstd", [self.ac_dim], tf.float32, tf.zeros([self.ac_dim]))
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1))
else:
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = tf.add(sy_mean, tf.multiply(tf.exp(sy_logstd), tf.random_normal(tf.shape(sy_mean))))
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
dist = tf.distributions.Categorical(sy_logits_na)
sy_logprob_n = dist.log_prob(sy_ac_na)
else:
sy_mean, sy_logstd = policy_parameters
dist = tf.contrib.distributions.MultivariateNormalDiag(sy_mean, sy_logstd)
sy_logprob_n = dist.log_prob(sy_ac_na)
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
loss = tf.reduce_mean(tf.multiply(self.sy_logprob_n, self.sy_adv_n))
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
raise NotImplementedError
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = None
baseline_loss = None
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: np.array([ob])})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation": np.array(obs, dtype=np.float32),
"reward": np.array(rewards, dtype=np.float32),
"action": np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
if self.reward_to_go:
# understanding: a path is a trajectory, path_length is number of timesteps
# use sum of Q for all timesteps and all trajectories as estimation
q_n = []
for re in re_n:
q = np.zeros(len(re))
q[-1] = re[-1]
for i in range(len(q)-2, -1, -1):
q[i] = self.gamma * q[i+1]
q_n.extend(q)
q_n = np.array(q_n)
else:
q_n = []
for re in re_n:
q_value = 0
for i in range(len(re)):
q_value += (self.gamma ** i) * re[i]
q_n.extend(list(q_value * np.ones(len(re))))
q_n = np.array(q_n)
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
raise NotImplementedError
b_n = None # YOUR CODE HERE
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n = (np.array(adv_n) - np.mean(adv_n))/np.array(adv_n).std()
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
raise NotImplementedError
target_n = None
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
self.sess.run(self.update_op, feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na,
self.sy_adv_n: adv_n})
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
concurrency.py
|
from invoke.vendor.six.moves.queue import Queue
from invoke.util import ExceptionWrapper, ExceptionHandlingThread as EHThread
from spec import Spec, ok_, eq_
# TODO: rename
class ExceptionHandlingThread_(Spec):
class via_target:
def setup(self):
def worker(q):
q.put(7)
self.worker = worker
def base_case(self):
queue = Queue()
t = EHThread(target=self.worker, args=[queue])
t.start()
t.join()
eq_(queue.get(block=False), 7)
ok_(queue.empty())
def catches_exceptions(self):
# Induce exception by submitting a bad queue obj
t = EHThread(target=self.worker, args=[None])
t.start()
t.join()
wrapper = t.exception()
ok_(isinstance(wrapper, ExceptionWrapper))
eq_(wrapper.kwargs, {'args': [None], 'target': self.worker})
eq_(wrapper.type, AttributeError)
ok_(isinstance(wrapper.value, AttributeError))
def exhibits_is_dead_flag(self):
t = EHThread(target=self.worker, args=[None])
t.start()
t.join()
ok_(t.is_dead)
t = EHThread(target=self.worker, args=[Queue()])
t.start()
t.join()
ok_(not t.is_dead)
class via_subclassing:
def setup(self):
class MyThread(EHThread):
def __init__(self, *args, **kwargs):
self.queue = kwargs.pop('queue')
super(MyThread, self).__init__(*args, **kwargs)
def _run(self):
self.queue.put(7)
self.klass = MyThread
def base_case(self):
queue = Queue()
t = self.klass(queue=queue)
t.start()
t.join()
eq_(queue.get(block=False), 7)
ok_(queue.empty())
def catches_exceptions(self):
# Induce exception by submitting a bad queue obj
t = self.klass(queue=None)
t.start()
t.join()
wrapper = t.exception()
ok_(isinstance(wrapper, ExceptionWrapper))
eq_(wrapper.kwargs, {})
eq_(wrapper.type, AttributeError)
ok_(isinstance(wrapper.value, AttributeError))
def exhibits_is_dead_flag(self):
t = self.klass(queue=None)
t.start()
t.join()
ok_(t.is_dead)
t = self.klass(queue=Queue())
t.start()
t.join()
ok_(not t.is_dead)
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR, PARAMS
from common.hardware import HARDWARE, ANDROID, PC
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1005
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner(noop=(__name__ != "__main__" or not ANDROID))
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
no_ui = __name__ != "__main__" or not ANDROID
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s, noop=no_ui) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
# processes to end if thermal conditions exceed Green parameters
green_temp_processes = ['uploader']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if WEBCAM:
car_started_processes += [
'dmonitoringd',
'dmonitoringmodeld',
]
if not PC:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'gpsd',
'rtshield',
]
# starting dmonitoringmodeld when modeld is initializing can sometimes \
# result in a weird snpe state where dmon constantly uses more cpu than normal.
car_started_processes += ['modeld']
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
# TODO: Use method from HARDWARE
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
# heavyweight batch processes are gated on favorable thermal conditions
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
for p in green_temp_processes:
if p in persistent_processes:
kill_managed_process(p)
else:
for p in green_temp_processes:
if p in persistent_processes:
start_managed_process(p)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
HARDWARE.reboot(reason="recovery")
def main():
os.environ['PARAMS_PATH'] = PARAMS
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
test_cursor.py
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the cursor module."""
import copy
import gc
import itertools
import random
import re
import sys
import time
import threading
import warnings
sys.path[0:0] = [""]
from bson import decode_all
from bson.code import Code
from bson.py3compat import PY3
from bson.son import SON
from pymongo import (ASCENDING,
DESCENDING,
ALL,
OFF)
from pymongo.collation import Collation
from pymongo.cursor import Cursor, CursorType
from pymongo.errors import (ConfigurationError,
ExecutionTimeout,
InvalidOperation,
OperationFailure)
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference
from test import (client_context,
unittest,
IntegrationTest)
from test.utils import (EventListener,
ignore_deprecations,
rs_or_single_client,
WhiteListEventListener)
if PY3:
long = int
class TestCursor(IntegrationTest):
def test_deepcopy_cursor_littered_with_regexes(self):
cursor = self.db.test.find({
"x": re.compile("^hmmm.*"),
"y": [re.compile("^hmm.*")],
"z": {"a": [re.compile("^hm.*")]},
re.compile("^key.*"): {"a": [re.compile("^hm.*")]}})
cursor2 = copy.deepcopy(cursor)
self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec)
def test_add_remove_option(self):
cursor = self.db.test.find()
self.assertEqual(0, cursor._Cursor__query_flags)
cursor.add_option(2)
cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE)
self.assertEqual(2, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.add_option(32)
cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)
self.assertEqual(34, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.add_option(128)
cursor2 = self.db.test.find(
cursor_type=CursorType.TAILABLE_AWAIT).add_option(128)
self.assertEqual(162, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
self.assertEqual(162, cursor._Cursor__query_flags)
cursor.add_option(128)
self.assertEqual(162, cursor._Cursor__query_flags)
cursor.remove_option(128)
cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)
self.assertEqual(34, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.remove_option(32)
cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE)
self.assertEqual(2, cursor2._Cursor__query_flags)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
self.assertEqual(2, cursor._Cursor__query_flags)
cursor.remove_option(32)
self.assertEqual(2, cursor._Cursor__query_flags)
# Timeout
cursor = self.db.test.find(no_cursor_timeout=True)
self.assertEqual(16, cursor._Cursor__query_flags)
cursor2 = self.db.test.find().add_option(16)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.remove_option(16)
self.assertEqual(0, cursor._Cursor__query_flags)
# Tailable / Await data
cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)
self.assertEqual(34, cursor._Cursor__query_flags)
cursor2 = self.db.test.find().add_option(34)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.remove_option(32)
self.assertEqual(2, cursor._Cursor__query_flags)
# Partial
cursor = self.db.test.find(allow_partial_results=True)
self.assertEqual(128, cursor._Cursor__query_flags)
cursor2 = self.db.test.find().add_option(128)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
cursor.remove_option(128)
self.assertEqual(0, cursor._Cursor__query_flags)
def test_add_remove_option_exhaust(self):
# Exhaust - which mongos doesn't support
if client_context.is_mongos:
with self.assertRaises(InvalidOperation):
self.db.test.find(cursor_type=CursorType.EXHAUST)
else:
cursor = self.db.test.find(cursor_type=CursorType.EXHAUST)
self.assertEqual(64, cursor._Cursor__query_flags)
cursor2 = self.db.test.find().add_option(64)
self.assertEqual(cursor._Cursor__query_flags,
cursor2._Cursor__query_flags)
self.assertTrue(cursor._Cursor__exhaust)
cursor.remove_option(64)
self.assertEqual(0, cursor._Cursor__query_flags)
self.assertFalse(cursor._Cursor__exhaust)
def test_allow_disk_use(self):
db = self.db
db.pymongo_test.drop()
coll = db.pymongo_test
self.assertRaises(TypeError, coll.find().allow_disk_use, 'baz')
cursor = coll.find().allow_disk_use(True)
self.assertEqual(True, cursor._Cursor__allow_disk_use)
cursor = coll.find().allow_disk_use(False)
self.assertEqual(False, cursor._Cursor__allow_disk_use)
def test_max_time_ms(self):
db = self.db
db.pymongo_test.drop()
coll = db.pymongo_test
self.assertRaises(TypeError, coll.find().max_time_ms, 'foo')
coll.insert_one({"amalia": 1})
coll.insert_one({"amalia": 2})
coll.find().max_time_ms(None)
coll.find().max_time_ms(long(1))
cursor = coll.find().max_time_ms(999)
self.assertEqual(999, cursor._Cursor__max_time_ms)
cursor = coll.find().max_time_ms(10).max_time_ms(1000)
self.assertEqual(1000, cursor._Cursor__max_time_ms)
cursor = coll.find().max_time_ms(999)
c2 = cursor.clone()
self.assertEqual(999, c2._Cursor__max_time_ms)
self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec())
self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec())
self.assertTrue(coll.find_one(max_time_ms=1000))
client = self.client
if (not client_context.is_mongos
and client_context.test_commands_enabled):
# Cursor parses server timeout error in response to initial query.
client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
try:
cursor = coll.find().max_time_ms(1)
try:
next(cursor)
except ExecutionTimeout:
pass
else:
self.fail("ExecutionTimeout not raised")
self.assertRaises(ExecutionTimeout,
coll.find_one, max_time_ms=1)
finally:
client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
@client_context.require_version_min(3, 1, 9, -1)
def test_max_await_time_ms(self):
db = self.db
db.pymongo_test.drop()
coll = db.create_collection("pymongo_test", capped=True, size=4096)
self.assertRaises(TypeError, coll.find().max_await_time_ms, 'foo')
coll.insert_one({"amalia": 1})
coll.insert_one({"amalia": 2})
coll.find().max_await_time_ms(None)
coll.find().max_await_time_ms(long(1))
# When cursor is not tailable_await
cursor = coll.find()
self.assertEqual(None, cursor._Cursor__max_await_time_ms)
cursor = coll.find().max_await_time_ms(99)
self.assertEqual(None, cursor._Cursor__max_await_time_ms)
# If cursor is tailable_await and timeout is unset
cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT)
self.assertEqual(None, cursor._Cursor__max_await_time_ms)
# If cursor is tailable_await and timeout is set
cursor = coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)
self.assertEqual(99, cursor._Cursor__max_await_time_ms)
cursor = coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(
10).max_await_time_ms(90)
self.assertEqual(90, cursor._Cursor__max_await_time_ms)
listener = WhiteListEventListener('find', 'getMore')
coll = rs_or_single_client(
event_listeners=[listener])[self.db.name].pymongo_test
results = listener.results
# Tailable_await defaults.
list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT))
# find
self.assertFalse('maxTimeMS' in results['started'][0].command)
# getMore
self.assertFalse('maxTimeMS' in results['started'][1].command)
results.clear()
# Tailable_await with max_await_time_ms set.
list(coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertFalse('maxTimeMS' in results['started'][0].command)
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertTrue('maxTimeMS' in results['started'][1].command)
self.assertEqual(99, results['started'][1].command['maxTimeMS'])
results.clear()
# Tailable_await with max_time_ms
list(coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertTrue('maxTimeMS' in results['started'][0].command)
self.assertEqual(99, results['started'][0].command['maxTimeMS'])
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertFalse('maxTimeMS' in results['started'][1].command)
results.clear()
# Tailable_await with both max_time_ms and max_await_time_ms
list(coll.find(
cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(
99).max_await_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertTrue('maxTimeMS' in results['started'][0].command)
self.assertEqual(99, results['started'][0].command['maxTimeMS'])
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertTrue('maxTimeMS' in results['started'][1].command)
self.assertEqual(99, results['started'][1].command['maxTimeMS'])
results.clear()
# Non tailable_await with max_await_time_ms
list(coll.find(batch_size=1).max_await_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertFalse('maxTimeMS' in results['started'][0].command)
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertFalse('maxTimeMS' in results['started'][1].command)
results.clear()
# Non tailable_await with max_time_ms
list(coll.find(batch_size=1).max_time_ms(99))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertTrue('maxTimeMS' in results['started'][0].command)
self.assertEqual(99, results['started'][0].command['maxTimeMS'])
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertFalse('maxTimeMS' in results['started'][1].command)
# Non tailable_await with both max_time_ms and max_await_time_ms
list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88))
# find
self.assertEqual('find', results['started'][0].command_name)
self.assertTrue('maxTimeMS' in results['started'][0].command)
self.assertEqual(99, results['started'][0].command['maxTimeMS'])
# getMore
self.assertEqual('getMore', results['started'][1].command_name)
self.assertFalse('maxTimeMS' in results['started'][1].command)
@client_context.require_test_commands
@client_context.require_no_mongos
def test_max_time_ms_getmore(self):
# Test that Cursor handles server timeout error in response to getmore.
coll = self.db.pymongo_test
coll.insert_many([{} for _ in range(200)])
cursor = coll.find().max_time_ms(100)
# Send initial query before turning on failpoint.
next(cursor)
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
try:
try:
# Iterate up to first getmore.
list(cursor)
except ExecutionTimeout:
pass
else:
self.fail("ExecutionTimeout not raised")
finally:
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
def test_explain(self):
a = self.db.test.find()
a.explain()
for _ in a:
break
b = a.explain()
# "cursor" pre MongoDB 2.7.6, "executionStats" post
self.assertTrue("cursor" in b or "executionStats" in b)
def test_explain_with_read_concern(self):
# Do not add readConcern level to explain.
listener = WhiteListEventListener("explain")
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
coll = client.pymongo_test.test.with_options(
read_concern=ReadConcern(level="local"))
self.assertTrue(coll.find().explain())
started = listener.results['started']
self.assertEqual(len(started), 1)
self.assertNotIn("readConcern", started[0].command)
def test_hint(self):
db = self.db
self.assertRaises(TypeError, db.test.find().hint, 5.5)
db.test.drop()
db.test.insert_many([{"num": i, "foo": i} for i in range(100)])
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("num", ASCENDING)]).explain)
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("foo", ASCENDING)]).explain)
spec = [("num", DESCENDING)]
index = db.test.create_index(spec)
first = next(db.test.find())
self.assertEqual(0, first.get('num'))
first = next(db.test.find().hint(spec))
self.assertEqual(99, first.get('num'))
self.assertRaises(OperationFailure,
db.test.find({"num": 17, "foo": 17})
.hint([("foo", ASCENDING)]).explain)
a = db.test.find({"num": 17})
a.hint(spec)
for _ in a:
break
self.assertRaises(InvalidOperation, a.hint, spec)
def test_hint_by_name(self):
db = self.db
db.test.drop()
db.test.insert_many([{"i": i} for i in range(100)])
db.test.create_index([('i', DESCENDING)], name='fooindex')
first = next(db.test.find())
self.assertEqual(0, first.get('i'))
first = next(db.test.find().hint('fooindex'))
self.assertEqual(99, first.get('i'))
def test_limit(self):
db = self.db
self.assertRaises(TypeError, db.test.find().limit, None)
self.assertRaises(TypeError, db.test.find().limit, "hello")
self.assertRaises(TypeError, db.test.find().limit, 5.5)
self.assertTrue(db.test.find().limit(long(5)))
db.test.drop()
db.test.insert_many([{"x": i} for i in range(100)])
count = 0
for _ in db.test.find():
count += 1
self.assertEqual(count, 100)
count = 0
for _ in db.test.find().limit(20):
count += 1
self.assertEqual(count, 20)
count = 0
for _ in db.test.find().limit(99):
count += 1
self.assertEqual(count, 99)
count = 0
for _ in db.test.find().limit(1):
count += 1
self.assertEqual(count, 1)
count = 0
for _ in db.test.find().limit(0):
count += 1
self.assertEqual(count, 100)
count = 0
for _ in db.test.find().limit(0).limit(50).limit(10):
count += 1
self.assertEqual(count, 10)
a = db.test.find()
a.limit(10)
for _ in a:
break
self.assertRaises(InvalidOperation, a.limit, 5)
@ignore_deprecations # Ignore max without hint.
def test_max(self):
db = self.db
db.test.drop()
j_index = [("j", ASCENDING)]
db.test.create_index(j_index)
db.test.insert_many([{"j": j, "k": j} for j in range(10)])
def find(max_spec, expected_index):
cursor = db.test.find().max(max_spec)
if client_context.requires_hint_with_min_max_queries:
cursor = cursor.hint(expected_index)
return cursor
cursor = find([("j", 3)], j_index)
self.assertEqual(len(list(cursor)), 3)
# Tuple.
cursor = find((("j", 3),), j_index)
self.assertEqual(len(list(cursor)), 3)
# Compound index.
index_keys = [("j", ASCENDING), ("k", ASCENDING)]
db.test.create_index(index_keys)
cursor = find([("j", 3), ("k", 3)], index_keys)
self.assertEqual(len(list(cursor)), 3)
# Wrong order.
cursor = find([("k", 3), ("j", 3)], index_keys)
self.assertRaises(OperationFailure, list, cursor)
# No such index.
cursor = find([("k", 3)], "k")
self.assertRaises(OperationFailure, list, cursor)
self.assertRaises(TypeError, db.test.find().max, 10)
self.assertRaises(TypeError, db.test.find().max, {"j": 10})
@ignore_deprecations # Ignore min without hint.
def test_min(self):
db = self.db
db.test.drop()
j_index = [("j", ASCENDING)]
db.test.create_index(j_index)
db.test.insert_many([{"j": j, "k": j} for j in range(10)])
def find(min_spec, expected_index):
cursor = db.test.find().min(min_spec)
if client_context.requires_hint_with_min_max_queries:
cursor = cursor.hint(expected_index)
return cursor
cursor = find([("j", 3)], j_index)
self.assertEqual(len(list(cursor)), 7)
# Tuple.
cursor = find((("j", 3),), j_index)
self.assertEqual(len(list(cursor)), 7)
# Compound index.
index_keys = [("j", ASCENDING), ("k", ASCENDING)]
db.test.create_index(index_keys)
cursor = find([("j", 3), ("k", 3)], index_keys)
self.assertEqual(len(list(cursor)), 7)
# Wrong order.
cursor = find([("k", 3), ("j", 3)], index_keys)
self.assertRaises(OperationFailure, list, cursor)
# No such index.
cursor = find([("k", 3)], "k")
self.assertRaises(OperationFailure, list, cursor)
self.assertRaises(TypeError, db.test.find().min, 10)
self.assertRaises(TypeError, db.test.find().min, {"j": 10})
@client_context.require_version_max(4, 1, -1)
def test_min_max_without_hint(self):
coll = self.db.test
j_index = [("j", ASCENDING)]
coll.create_index(j_index)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("default", DeprecationWarning)
list(coll.find().min([("j", 3)]))
self.assertIn('using a min/max query operator', str(warns[0]))
# Ensure the warning is raised with the proper stack level.
del warns[:]
list(coll.find().min([("j", 3)]))
self.assertIn('using a min/max query operator', str(warns[0]))
del warns[:]
list(coll.find().max([("j", 3)]))
self.assertIn('using a min/max query operator', str(warns[0]))
def test_batch_size(self):
db = self.db
db.test.drop()
db.test.insert_many([{"x": x} for x in range(200)])
self.assertRaises(TypeError, db.test.find().batch_size, None)
self.assertRaises(TypeError, db.test.find().batch_size, "hello")
self.assertRaises(TypeError, db.test.find().batch_size, 5.5)
self.assertRaises(ValueError, db.test.find().batch_size, -1)
self.assertTrue(db.test.find().batch_size(long(5)))
a = db.test.find()
for _ in a:
break
self.assertRaises(InvalidOperation, a.batch_size, 5)
def cursor_count(cursor, expected_count):
count = 0
for _ in cursor:
count += 1
self.assertEqual(expected_count, count)
cursor_count(db.test.find().batch_size(0), 200)
cursor_count(db.test.find().batch_size(1), 200)
cursor_count(db.test.find().batch_size(2), 200)
cursor_count(db.test.find().batch_size(5), 200)
cursor_count(db.test.find().batch_size(100), 200)
cursor_count(db.test.find().batch_size(500), 200)
cursor_count(db.test.find().batch_size(0).limit(1), 1)
cursor_count(db.test.find().batch_size(1).limit(1), 1)
cursor_count(db.test.find().batch_size(2).limit(1), 1)
cursor_count(db.test.find().batch_size(5).limit(1), 1)
cursor_count(db.test.find().batch_size(100).limit(1), 1)
cursor_count(db.test.find().batch_size(500).limit(1), 1)
cursor_count(db.test.find().batch_size(0).limit(10), 10)
cursor_count(db.test.find().batch_size(1).limit(10), 10)
cursor_count(db.test.find().batch_size(2).limit(10), 10)
cursor_count(db.test.find().batch_size(5).limit(10), 10)
cursor_count(db.test.find().batch_size(100).limit(10), 10)
cursor_count(db.test.find().batch_size(500).limit(10), 10)
cur = db.test.find().batch_size(1)
next(cur)
if client_context.version.at_least(3, 1, 9):
# find command batchSize should be 1
self.assertEqual(0, len(cur._Cursor__data))
else:
# OP_QUERY ntoreturn should be 2
self.assertEqual(1, len(cur._Cursor__data))
next(cur)
self.assertEqual(0, len(cur._Cursor__data))
next(cur)
self.assertEqual(0, len(cur._Cursor__data))
next(cur)
self.assertEqual(0, len(cur._Cursor__data))
def test_limit_and_batch_size(self):
db = self.db
db.test.drop()
db.test.insert_many([{"x": x} for x in range(500)])
curs = db.test.find().limit(0).batch_size(10)
next(curs)
self.assertEqual(10, curs._Cursor__retrieved)
curs = db.test.find(limit=0, batch_size=10)
next(curs)
self.assertEqual(10, curs._Cursor__retrieved)
curs = db.test.find().limit(-2).batch_size(0)
next(curs)
self.assertEqual(2, curs._Cursor__retrieved)
curs = db.test.find(limit=-2, batch_size=0)
next(curs)
self.assertEqual(2, curs._Cursor__retrieved)
curs = db.test.find().limit(-4).batch_size(5)
next(curs)
self.assertEqual(4, curs._Cursor__retrieved)
curs = db.test.find(limit=-4, batch_size=5)
next(curs)
self.assertEqual(4, curs._Cursor__retrieved)
curs = db.test.find().limit(50).batch_size(500)
next(curs)
self.assertEqual(50, curs._Cursor__retrieved)
curs = db.test.find(limit=50, batch_size=500)
next(curs)
self.assertEqual(50, curs._Cursor__retrieved)
curs = db.test.find().batch_size(500)
next(curs)
self.assertEqual(500, curs._Cursor__retrieved)
curs = db.test.find(batch_size=500)
next(curs)
self.assertEqual(500, curs._Cursor__retrieved)
curs = db.test.find().limit(50)
next(curs)
self.assertEqual(50, curs._Cursor__retrieved)
curs = db.test.find(limit=50)
next(curs)
self.assertEqual(50, curs._Cursor__retrieved)
# these two might be shaky, as the default
# is set by the server. as of 2.0.0-rc0, 101
# or 1MB (whichever is smaller) is default
# for queries without ntoreturn
curs = db.test.find()
next(curs)
self.assertEqual(101, curs._Cursor__retrieved)
curs = db.test.find().limit(0).batch_size(0)
next(curs)
self.assertEqual(101, curs._Cursor__retrieved)
curs = db.test.find(limit=0, batch_size=0)
next(curs)
self.assertEqual(101, curs._Cursor__retrieved)
def test_skip(self):
db = self.db
self.assertRaises(TypeError, db.test.find().skip, None)
self.assertRaises(TypeError, db.test.find().skip, "hello")
self.assertRaises(TypeError, db.test.find().skip, 5.5)
self.assertRaises(ValueError, db.test.find().skip, -5)
self.assertTrue(db.test.find().skip(long(5)))
db.drop_collection("test")
db.test.insert_many([{"x": i} for i in range(100)])
for i in db.test.find():
self.assertEqual(i["x"], 0)
break
for i in db.test.find().skip(20):
self.assertEqual(i["x"], 20)
break
for i in db.test.find().skip(99):
self.assertEqual(i["x"], 99)
break
for i in db.test.find().skip(1):
self.assertEqual(i["x"], 1)
break
for i in db.test.find().skip(0):
self.assertEqual(i["x"], 0)
break
for i in db.test.find().skip(0).skip(50).skip(10):
self.assertEqual(i["x"], 10)
break
for i in db.test.find().skip(1000):
self.fail()
a = db.test.find()
a.skip(10)
for _ in a:
break
self.assertRaises(InvalidOperation, a.skip, 5)
def test_sort(self):
db = self.db
self.assertRaises(TypeError, db.test.find().sort, 5)
self.assertRaises(ValueError, db.test.find().sort, [])
self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING)
self.assertRaises(TypeError, db.test.find().sort,
[("hello", DESCENDING)], DESCENDING)
db.test.drop()
unsort = list(range(10))
random.shuffle(unsort)
db.test.insert_many([{"x": i} for i in unsort])
asc = [i["x"] for i in db.test.find().sort("x", ASCENDING)]
self.assertEqual(asc, list(range(10)))
asc = [i["x"] for i in db.test.find().sort("x")]
self.assertEqual(asc, list(range(10)))
asc = [i["x"] for i in db.test.find().sort([("x", ASCENDING)])]
self.assertEqual(asc, list(range(10)))
expect = list(reversed(range(10)))
desc = [i["x"] for i in db.test.find().sort("x", DESCENDING)]
self.assertEqual(desc, expect)
desc = [i["x"] for i in db.test.find().sort([("x", DESCENDING)])]
self.assertEqual(desc, expect)
desc = [i["x"] for i in
db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)]
self.assertEqual(desc, expect)
expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)]
shuffled = list(expected)
random.shuffle(shuffled)
db.test.drop()
for (a, b) in shuffled:
db.test.insert_one({"a": a, "b": b})
result = [(i["a"], i["b"]) for i in
db.test.find().sort([("b", DESCENDING),
("a", ASCENDING)])]
self.assertEqual(result, expected)
a = db.test.find()
a.sort("x", ASCENDING)
for _ in a:
break
self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING)
@ignore_deprecations
def test_count(self):
db = self.db
db.test.drop()
self.assertEqual(0, db.test.find().count())
db.test.insert_many([{"x": i} for i in range(10)])
self.assertEqual(10, db.test.find().count())
self.assertTrue(isinstance(db.test.find().count(), int))
self.assertEqual(10, db.test.find().limit(5).count())
self.assertEqual(10, db.test.find().skip(5).count())
self.assertEqual(1, db.test.find({"x": 1}).count())
self.assertEqual(5, db.test.find({"x": {"$lt": 5}}).count())
a = db.test.find()
b = a.count()
for _ in a:
break
self.assertEqual(b, a.count())
self.assertEqual(0, db.test.acollectionthatdoesntexist.find().count())
@ignore_deprecations
def test_count_with_hint(self):
collection = self.db.test
collection.drop()
collection.insert_many([{'i': 1}, {'i': 2}])
self.assertEqual(2, collection.find().count())
collection.create_index([('i', 1)])
self.assertEqual(1, collection.find({'i': 1}).hint("_id_").count())
self.assertEqual(2, collection.find().hint("_id_").count())
self.assertRaises(OperationFailure,
collection.find({'i': 1}).hint("BAD HINT").count)
# Create a sparse index which should have no entries.
collection.create_index([('x', 1)], sparse=True)
self.assertEqual(0, collection.find({'i': 1}).hint("x_1").count())
self.assertEqual(
0, collection.find({'i': 1}).hint([("x", 1)]).count())
if client_context.version.at_least(3, 3, 2):
self.assertEqual(0, collection.find().hint("x_1").count())
self.assertEqual(0, collection.find().hint([("x", 1)]).count())
else:
self.assertEqual(2, collection.find().hint("x_1").count())
self.assertEqual(2, collection.find().hint([("x", 1)]).count())
@ignore_deprecations
def test_where(self):
db = self.db
db.test.drop()
a = db.test.find()
self.assertRaises(TypeError, a.where, 5)
self.assertRaises(TypeError, a.where, None)
self.assertRaises(TypeError, a.where, {})
db.test.insert_many([{"x": i} for i in range(10)])
self.assertEqual(3, len(list(db.test.find().where('this.x < 3'))))
self.assertEqual(3,
len(list(db.test.find().where(Code('this.x < 3')))))
code_with_scope = Code('this.x < i', {"i": 3})
if client_context.version.at_least(4, 3, 3):
# MongoDB 4.4 removed support for Code with scope.
with self.assertRaises(OperationFailure):
list(db.test.find().where(code_with_scope))
code_with_empty_scope = Code('this.x < 3', {})
with self.assertRaises(OperationFailure):
list(db.test.find().where(code_with_empty_scope))
else:
self.assertEqual(
3, len(list(db.test.find().where(code_with_scope))))
self.assertEqual(10, len(list(db.test.find())))
self.assertEqual(3, db.test.find().where('this.x < 3').count())
self.assertEqual(10, db.test.find().count())
self.assertEqual(3, db.test.find().where(u'this.x < 3').count())
self.assertEqual([0, 1, 2],
[a["x"] for a in
db.test.find().where('this.x < 3')])
self.assertEqual([],
[a["x"] for a in
db.test.find({"x": 5}).where('this.x < 3')])
self.assertEqual([5],
[a["x"] for a in
db.test.find({"x": 5}).where('this.x > 3')])
cursor = db.test.find().where('this.x < 3').where('this.x > 7')
self.assertEqual([8, 9], [a["x"] for a in cursor])
a = db.test.find()
b = a.where('this.x > 3')
for _ in a:
break
self.assertRaises(InvalidOperation, a.where, 'this.x < 3')
def test_rewind(self):
self.db.test.insert_many([{"x": i} for i in range(1, 4)])
cursor = self.db.test.find().limit(2)
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
count = 0
for _ in cursor:
count += 1
self.assertEqual(0, count)
cursor.rewind()
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
cursor.rewind()
count = 0
for _ in cursor:
break
cursor.rewind()
for _ in cursor:
count += 1
self.assertEqual(2, count)
self.assertEqual(cursor, cursor.rewind())
# manipulate, oplog_reply, and snapshot are all deprecated.
@ignore_deprecations
def test_clone(self):
self.db.test.insert_many([{"x": i} for i in range(1, 4)])
cursor = self.db.test.find().limit(2)
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
count = 0
for _ in cursor:
count += 1
self.assertEqual(0, count)
cursor = cursor.clone()
cursor2 = cursor.clone()
count = 0
for _ in cursor:
count += 1
self.assertEqual(2, count)
for _ in cursor2:
count += 1
self.assertEqual(4, count)
cursor.rewind()
count = 0
for _ in cursor:
break
cursor = cursor.clone()
for _ in cursor:
count += 1
self.assertEqual(2, count)
self.assertNotEqual(cursor, cursor.clone())
# Just test attributes
cursor = self.db.test.find({"x": re.compile("^hello.*")},
projection={'_id': False},
skip=1,
no_cursor_timeout=True,
cursor_type=CursorType.TAILABLE_AWAIT,
sort=[("x", 1)],
allow_partial_results=True,
oplog_replay=True,
batch_size=123,
manipulate=False,
collation={'locale': 'en_US'},
hint=[("_id", 1)],
max_scan=100,
max_time_ms=1000,
return_key=True,
show_record_id=True,
snapshot=True,
allow_disk_use=True).limit(2)
cursor.min([('a', 1)]).max([('b', 3)])
cursor.add_option(128)
cursor.comment('hi!')
# Every attribute should be the same.
cursor2 = cursor.clone()
self.assertEqual(cursor.__dict__, cursor2.__dict__)
# Shallow copies can so can mutate
cursor2 = copy.copy(cursor)
cursor2._Cursor__projection['cursor2'] = False
self.assertTrue('cursor2' in cursor._Cursor__projection)
# Deepcopies and shouldn't mutate
cursor3 = copy.deepcopy(cursor)
cursor3._Cursor__projection['cursor3'] = False
self.assertFalse('cursor3' in cursor._Cursor__projection)
cursor4 = cursor.clone()
cursor4._Cursor__projection['cursor4'] = False
self.assertFalse('cursor4' in cursor._Cursor__projection)
# Test memo when deepcopying queries
query = {"hello": "world"}
query["reflexive"] = query
cursor = self.db.test.find(query)
cursor2 = copy.deepcopy(cursor)
self.assertNotEqual(id(cursor._Cursor__spec),
id(cursor2._Cursor__spec))
self.assertEqual(id(cursor2._Cursor__spec['reflexive']),
id(cursor2._Cursor__spec))
self.assertEqual(len(cursor2._Cursor__spec), 2)
# Ensure hints are cloned as the correct type
cursor = self.db.test.find().hint([('z', 1), ("a", 1)])
cursor2 = copy.deepcopy(cursor)
self.assertTrue(isinstance(cursor2._Cursor__hint, SON))
self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint)
def test_clone_empty(self):
self.db.test.delete_many({})
self.db.test.insert_many([{"x": i} for i in range(1, 4)])
cursor = self.db.test.find()[2:2]
cursor2 = cursor.clone()
self.assertRaises(StopIteration, cursor.next)
self.assertRaises(StopIteration, cursor2.next)
@ignore_deprecations
def test_count_with_fields(self):
self.db.test.drop()
self.db.test.insert_one({"x": 1})
self.assertEqual(1, self.db.test.find({}, ["a"]).count())
def test_bad_getitem(self):
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello")
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5)
self.assertRaises(TypeError, lambda x: self.db.test.find()[x], None)
def test_getitem_slice_index(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"i": i} for i in range(100)])
count = itertools.count
self.assertRaises(IndexError, lambda: self.db.test.find()[-1:])
self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2])
for a, b in zip(count(0), self.db.test.find()):
self.assertEqual(a, b['i'])
self.assertEqual(100, len(list(self.db.test.find()[0:])))
for a, b in zip(count(0), self.db.test.find()[0:]):
self.assertEqual(a, b['i'])
self.assertEqual(80, len(list(self.db.test.find()[20:])))
for a, b in zip(count(20), self.db.test.find()[20:]):
self.assertEqual(a, b['i'])
for a, b in zip(count(99), self.db.test.find()[99:]):
self.assertEqual(a, b['i'])
for i in self.db.test.find()[1000:]:
self.fail()
self.assertEqual(5, len(list(self.db.test.find()[20:25])))
self.assertEqual(5, len(list(
self.db.test.find()[long(20):long(25)])))
for a, b in zip(count(20), self.db.test.find()[20:25]):
self.assertEqual(a, b['i'])
self.assertEqual(80, len(list(self.db.test.find()[40:45][20:])))
for a, b in zip(count(20), self.db.test.find()[40:45][20:]):
self.assertEqual(a, b['i'])
self.assertEqual(80,
len(list(self.db.test.find()[40:45].limit(0).skip(20))
)
)
for a, b in zip(count(20),
self.db.test.find()[40:45].limit(0).skip(20)):
self.assertEqual(a, b['i'])
self.assertEqual(80,
len(list(self.db.test.find().limit(10).skip(40)[20:]))
)
for a, b in zip(count(20),
self.db.test.find().limit(10).skip(40)[20:]):
self.assertEqual(a, b['i'])
self.assertEqual(1, len(list(self.db.test.find()[:1])))
self.assertEqual(5, len(list(self.db.test.find()[:5])))
self.assertEqual(1, len(list(self.db.test.find()[99:100])))
self.assertEqual(1, len(list(self.db.test.find()[99:1000])))
self.assertEqual(0, len(list(self.db.test.find()[10:10])))
self.assertEqual(0, len(list(self.db.test.find()[:0])))
self.assertEqual(80,
len(list(self.db.test.find()[10:10].limit(0).skip(20))
)
)
self.assertRaises(IndexError, lambda: self.db.test.find()[10:8])
def test_getitem_numeric_index(self):
self.db.drop_collection("test")
self.db.test.insert_many([{"i": i} for i in range(100)])
self.assertEqual(0, self.db.test.find()[0]['i'])
self.assertEqual(50, self.db.test.find()[50]['i'])
self.assertEqual(50, self.db.test.find().skip(50)[0]['i'])
self.assertEqual(50, self.db.test.find().skip(49)[1]['i'])
self.assertEqual(50, self.db.test.find()[long(50)]['i'])
self.assertEqual(99, self.db.test.find()[99]['i'])
self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1)
self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100)
self.assertRaises(IndexError,
lambda x: self.db.test.find().skip(50)[x], 50)
@ignore_deprecations
def test_count_with_limit_and_skip(self):
self.assertRaises(TypeError, self.db.test.find().count, "foo")
def check_len(cursor, length):
self.assertEqual(len(list(cursor)), cursor.count(True))
self.assertEqual(length, cursor.count(True))
self.db.drop_collection("test")
self.db.test.insert_many([{"i": i} for i in range(100)])
check_len(self.db.test.find(), 100)
check_len(self.db.test.find().limit(10), 10)
check_len(self.db.test.find().limit(110), 100)
check_len(self.db.test.find().skip(10), 90)
check_len(self.db.test.find().skip(110), 0)
check_len(self.db.test.find().limit(10).skip(10), 10)
check_len(self.db.test.find()[10:20], 10)
check_len(self.db.test.find().limit(10).skip(95), 5)
check_len(self.db.test.find()[95:105], 5)
def test_len(self):
self.assertRaises(TypeError, len, self.db.test.find())
def test_properties(self):
self.assertEqual(self.db.test, self.db.test.find().collection)
def set_coll():
self.db.test.find().collection = "hello"
self.assertRaises(AttributeError, set_coll)
def test_get_more(self):
db = self.db
db.drop_collection("test")
db.test.insert_many([{'i': i} for i in range(10)])
self.assertEqual(10, len(list(db.test.find().batch_size(5))))
def test_tailable(self):
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000, max=3)
self.addCleanup(db.drop_collection, "test")
cursor = db.test.find(cursor_type=CursorType.TAILABLE)
db.test.insert_one({"x": 1})
count = 0
for doc in cursor:
count += 1
self.assertEqual(1, doc["x"])
self.assertEqual(1, count)
db.test.insert_one({"x": 2})
count = 0
for doc in cursor:
count += 1
self.assertEqual(2, doc["x"])
self.assertEqual(1, count)
db.test.insert_one({"x": 3})
count = 0
for doc in cursor:
count += 1
self.assertEqual(3, doc["x"])
self.assertEqual(1, count)
# Capped rollover - the collection can never
# have more than 3 documents. Just make sure
# this doesn't raise...
db.test.insert_many([{"x": i} for i in range(4, 7)])
self.assertEqual(0, len(list(cursor)))
# and that the cursor doesn't think it's still alive.
self.assertFalse(cursor.alive)
self.assertEqual(3, db.test.count_documents({}))
# __getitem__(index)
for cursor in (db.test.find(cursor_type=CursorType.TAILABLE),
db.test.find(cursor_type=CursorType.TAILABLE_AWAIT)):
self.assertEqual(4, cursor[0]["x"])
self.assertEqual(5, cursor[1]["x"])
self.assertEqual(6, cursor[2]["x"])
cursor.rewind()
self.assertEqual([4], [doc["x"] for doc in cursor[0:1]])
cursor.rewind()
self.assertEqual([5], [doc["x"] for doc in cursor[1:2]])
cursor.rewind()
self.assertEqual([6], [doc["x"] for doc in cursor[2:3]])
cursor.rewind()
self.assertEqual([4, 5], [doc["x"] for doc in cursor[0:2]])
cursor.rewind()
self.assertEqual([5, 6], [doc["x"] for doc in cursor[1:3]])
cursor.rewind()
self.assertEqual([4, 5, 6], [doc["x"] for doc in cursor[0:3]])
def test_concurrent_close(self):
"""Ensure a tailable can be closed from another thread."""
db = self.db
db.drop_collection("test")
db.create_collection("test", capped=True, size=1000, max=3)
self.addCleanup(db.drop_collection, "test")
cursor = db.test.find(cursor_type=CursorType.TAILABLE)
def iterate_cursor():
while cursor.alive:
for doc in cursor:
pass
t = threading.Thread(target=iterate_cursor)
t.start()
time.sleep(1)
cursor.close()
self.assertFalse(cursor.alive)
t.join(3)
self.assertFalse(t.is_alive())
def test_distinct(self):
self.db.drop_collection("test")
self.db.test.insert_many(
[{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}])
distinct = self.db.test.find({"a": {"$lt": 3}}).distinct("a")
distinct.sort()
self.assertEqual([1, 2], distinct)
self.db.drop_collection("test")
self.db.test.insert_one({"a": {"b": "a"}, "c": 12})
self.db.test.insert_one({"a": {"b": "b"}, "c": 8})
self.db.test.insert_one({"a": {"b": "c"}, "c": 12})
self.db.test.insert_one({"a": {"b": "c"}, "c": 8})
distinct = self.db.test.find({"c": 8}).distinct("a.b")
distinct.sort()
self.assertEqual(["b", "c"], distinct)
@client_context.require_version_max(4, 1, 0, -1)
def test_max_scan(self):
self.db.drop_collection("test")
self.db.test.insert_many([{} for _ in range(100)])
self.assertEqual(100, len(list(self.db.test.find())))
self.assertEqual(50, len(list(self.db.test.find().max_scan(50))))
self.assertEqual(50, len(list(self.db.test.find()
.max_scan(90).max_scan(50))))
def test_with_statement(self):
self.db.drop_collection("test")
self.db.test.insert_many([{} for _ in range(100)])
c1 = self.db.test.find()
with self.db.test.find() as c2:
self.assertTrue(c2.alive)
self.assertFalse(c2.alive)
with self.db.test.find() as c2:
self.assertEqual(100, len(list(c2)))
self.assertFalse(c2.alive)
self.assertTrue(c1.alive)
@client_context.require_no_mongos
@ignore_deprecations
def test_comment(self):
# MongoDB 3.1.5 changed the ns for commands.
regex = {'$regex': r'pymongo_test.(\$cmd|test)'}
if client_context.version.at_least(3, 5, 8, -1):
query_key = "command.comment"
elif client_context.version.at_least(3, 1, 8, -1):
query_key = "query.comment"
else:
query_key = "query.$comment"
self.client.drop_database(self.db)
self.db.set_profiling_level(ALL)
try:
list(self.db.test.find().comment('foo'))
op = self.db.system.profile.find({'ns': 'pymongo_test.test',
'op': 'query',
query_key: 'foo'})
self.assertEqual(op.count(), 1)
self.db.test.find().comment('foo').count()
op = self.db.system.profile.find({'ns': regex,
'op': 'command',
'command.count': 'test',
'command.comment': 'foo'})
self.assertEqual(op.count(), 1)
self.db.test.find().comment('foo').distinct('type')
op = self.db.system.profile.find({'ns': regex,
'op': 'command',
'command.distinct': 'test',
'command.comment': 'foo'})
self.assertEqual(op.count(), 1)
finally:
self.db.set_profiling_level(OFF)
self.db.system.profile.drop()
self.db.test.insert_many([{}, {}])
cursor = self.db.test.find()
next(cursor)
self.assertRaises(InvalidOperation, cursor.comment, 'hello')
def test_modifiers(self):
c = self.db.test
# "modifiers" is deprecated.
with ignore_deprecations():
cur = c.find()
self.assertTrue('$query' not in cur._Cursor__query_spec())
cur = c.find().comment("testing").max_time_ms(500)
self.assertTrue('$query' in cur._Cursor__query_spec())
self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing")
self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500)
cur = c.find(
modifiers={"$maxTimeMS": 500, "$comment": "testing"})
self.assertTrue('$query' in cur._Cursor__query_spec())
self.assertEqual(cur._Cursor__query_spec()["$comment"], "testing")
self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 500)
# Keyword arg overwrites modifier.
# If we remove the "modifiers" arg, delete this test after checking
# that TestCommandMonitoring.test_find_options covers all cases.
cur = c.find(comment="hi", modifiers={"$comment": "bye"})
self.assertEqual(cur._Cursor__query_spec()["$comment"], "hi")
cur = c.find(max_scan=1, modifiers={"$maxScan": 2})
self.assertEqual(cur._Cursor__query_spec()["$maxScan"], 1)
cur = c.find(max_time_ms=1, modifiers={"$maxTimeMS": 2})
self.assertEqual(cur._Cursor__query_spec()["$maxTimeMS"], 1)
cur = c.find(min=1, modifiers={"$min": 2})
self.assertEqual(cur._Cursor__query_spec()["$min"], 1)
cur = c.find(max=1, modifiers={"$max": 2})
self.assertEqual(cur._Cursor__query_spec()["$max"], 1)
cur = c.find(return_key=True, modifiers={"$returnKey": False})
self.assertEqual(cur._Cursor__query_spec()["$returnKey"], True)
cur = c.find(hint=[("a", 1)], modifiers={"$hint": {"b": "1"}})
self.assertEqual(cur._Cursor__query_spec()["$hint"], {"a": 1})
# The arg is named show_record_id after the "find" command arg, the
# modifier is named $showDiskLoc for the OP_QUERY modifier. It's
# stored as $showDiskLoc then upgraded to showRecordId if we send a
# "find" command.
cur = c.find(show_record_id=True, modifiers={"$showDiskLoc": False})
self.assertEqual(cur._Cursor__query_spec()["$showDiskLoc"], True)
if not client_context.version.at_least(3, 7, 3):
cur = c.find(snapshot=True, modifiers={"$snapshot": False})
self.assertEqual(cur._Cursor__query_spec()["$snapshot"], True)
def test_alive(self):
self.db.test.delete_many({})
self.db.test.insert_many([{} for _ in range(3)])
self.addCleanup(self.db.test.delete_many, {})
cursor = self.db.test.find().batch_size(2)
n = 0
while True:
cursor.next()
n += 1
if 3 == n:
self.assertFalse(cursor.alive)
break
self.assertTrue(cursor.alive)
def test_close_kills_cursor_synchronously(self):
# Kill any cursors possibly queued up by previous tests.
gc.collect()
self.client._process_periodic_tasks()
listener = WhiteListEventListener("killCursors")
results = listener.results
client = rs_or_single_client(event_listeners=[listener])
self.addCleanup(client.close)
coll = client[self.db.name].test_close_kills_cursors
# Add some test data.
docs_inserted = 1000
coll.insert_many([{"i": i} for i in range(docs_inserted)])
results.clear()
# Close a cursor while it's still open on the server.
cursor = coll.find().batch_size(10)
self.assertTrue(bool(next(cursor)))
self.assertLess(cursor.retrieved, docs_inserted)
cursor.close()
def assertCursorKilled():
self.assertEqual(1, len(results["started"]))
self.assertEqual("killCursors", results["started"][0].command_name)
self.assertEqual(1, len(results["succeeded"]))
self.assertEqual("killCursors",
results["succeeded"][0].command_name)
assertCursorKilled()
results.clear()
# Close a command cursor while it's still open on the server.
cursor = coll.aggregate([], batchSize=10)
self.assertTrue(bool(next(cursor)))
cursor.close()
# The cursor should be killed if it had a non-zero id.
if cursor.cursor_id:
assertCursorKilled()
else:
self.assertEqual(0, len(results["started"]))
def test_delete_not_initialized(self):
# Creating a cursor with invalid arguments will not run __init__
# but will still call __del__, eg test.find(invalidKwarg=1).
cursor = Cursor.__new__(Cursor) # Skip calling __init__
cursor.__del__() # no error
@client_context.require_version_min(3, 6)
def test_getMore_does_not_send_readPreference(self):
listener = WhiteListEventListener('find', 'getMore')
client = rs_or_single_client(
event_listeners=[listener])
self.addCleanup(client.close)
coll = client[self.db.name].test
coll.delete_many({})
coll.insert_many([{} for _ in range(5)])
self.addCleanup(coll.drop)
list(coll.find(batch_size=3))
started = listener.results['started']
self.assertEqual(2, len(started))
self.assertEqual('find', started[0].command_name)
self.assertIn('$readPreference', started[0].command)
self.assertEqual('getMore', started[1].command_name)
self.assertNotIn('$readPreference', started[1].command)
class TestRawBatchCursor(IntegrationTest):
def test_find_raw(self):
c = self.db.test
c.drop()
docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)]
c.insert_many(docs)
batches = list(c.find_raw_batches().sort('_id'))
self.assertEqual(1, len(batches))
self.assertEqual(docs, decode_all(batches[0]))
def test_manipulate(self):
c = self.db.test
with self.assertRaises(InvalidOperation):
c.find_raw_batches(manipulate=True)
def test_explain(self):
c = self.db.test
c.insert_one({})
explanation = c.find_raw_batches().explain()
self.assertIsInstance(explanation, dict)
def test_clone(self):
cursor = self.db.test.find_raw_batches()
# Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor.
self.assertIsInstance(next(cursor.clone()), bytes)
self.assertIsInstance(next(copy.copy(cursor)), bytes)
@client_context.require_no_mongos
def test_exhaust(self):
c = self.db.test
c.drop()
c.insert_many({'_id': i} for i in range(200))
result = b''.join(c.find_raw_batches(cursor_type=CursorType.EXHAUST))
self.assertEqual([{'_id': i} for i in range(200)], decode_all(result))
def test_server_error(self):
with self.assertRaises(OperationFailure) as exc:
next(self.db.test.find_raw_batches({'x': {'$bad': 1}}))
# The server response was decoded, not left raw.
self.assertIsInstance(exc.exception.details, dict)
def test_get_item(self):
with self.assertRaises(InvalidOperation):
self.db.test.find_raw_batches()[0]
@client_context.require_version_min(3, 4)
def test_collation(self):
next(self.db.test.find_raw_batches(collation=Collation('en_US')))
@client_context.require_version_max(3, 2)
def test_collation_error(self):
with self.assertRaises(ConfigurationError):
next(self.db.test.find_raw_batches(collation=Collation('en_US')))
@client_context.require_version_min(3, 2)
def test_read_concern(self):
c = self.db.get_collection("test", read_concern=ReadConcern("majority"))
next(c.find_raw_batches())
@client_context.require_version_max(3, 1)
def test_read_concern_error(self):
c = self.db.get_collection("test", read_concern=ReadConcern("majority"))
with self.assertRaises(ConfigurationError):
next(c.find_raw_batches())
def test_monitoring(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
c = client.pymongo_test.test
c.drop()
c.insert_many([{'_id': i} for i in range(10)])
listener.results.clear()
cursor = c.find_raw_batches(batch_size=4)
# First raw batch of 4 documents.
next(cursor)
started = listener.results['started'][0]
succeeded = listener.results['succeeded'][0]
self.assertEqual(0, len(listener.results['failed']))
self.assertEqual('find', started.command_name)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('find', succeeded.command_name)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["ns"], "pymongo_test.test")
# The batch is a list of one raw bytes object.
self.assertEqual(len(csr["firstBatch"]), 1)
self.assertEqual(decode_all(csr["firstBatch"][0]),
[{'_id': i} for i in range(0, 4)])
listener.results.clear()
# Next raw batch of 4 documents.
next(cursor)
try:
results = listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertEqual('getMore', started.command_name)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('getMore', succeeded.command_name)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["ns"], "pymongo_test.test")
self.assertEqual(len(csr["nextBatch"]), 1)
self.assertEqual(decode_all(csr["nextBatch"][0]),
[{'_id': i} for i in range(4, 8)])
finally:
# Finish the cursor.
tuple(cursor)
class TestRawBatchCommandCursor(IntegrationTest):
@classmethod
def setUpClass(cls):
super(TestRawBatchCommandCursor, cls).setUpClass()
def test_aggregate_raw(self):
c = self.db.test
c.drop()
docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)]
c.insert_many(docs)
batches = list(c.aggregate_raw_batches([{'$sort': {'_id': 1}}]))
self.assertEqual(1, len(batches))
self.assertEqual(docs, decode_all(batches[0]))
def test_server_error(self):
c = self.db.test
c.drop()
docs = [{'_id': i, 'x': 3.0 * i} for i in range(10)]
c.insert_many(docs)
c.insert_one({'_id': 10, 'x': 'not a number'})
with self.assertRaises(OperationFailure) as exc:
list(self.db.test.aggregate_raw_batches([{
'$sort': {'_id': 1},
}, {
'$project': {'x': {'$multiply': [2, '$x']}}
}], batchSize=4))
# The server response was decoded, not left raw.
self.assertIsInstance(exc.exception.details, dict)
def test_get_item(self):
with self.assertRaises(InvalidOperation):
self.db.test.aggregate_raw_batches([])[0]
@client_context.require_version_min(3, 4)
def test_collation(self):
next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US')))
@client_context.require_version_max(3, 2)
def test_collation_error(self):
with self.assertRaises(ConfigurationError):
next(self.db.test.aggregate_raw_batches([], collation=Collation('en_US')))
def test_monitoring(self):
listener = EventListener()
client = rs_or_single_client(event_listeners=[listener])
c = client.pymongo_test.test
c.drop()
c.insert_many([{'_id': i} for i in range(10)])
listener.results.clear()
cursor = c.aggregate_raw_batches([{'$sort': {'_id': 1}}], batchSize=4)
# Start cursor, no initial batch.
started = listener.results['started'][0]
succeeded = listener.results['succeeded'][0]
self.assertEqual(0, len(listener.results['failed']))
self.assertEqual('aggregate', started.command_name)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('aggregate', succeeded.command_name)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["ns"], "pymongo_test.test")
# First batch is empty.
self.assertEqual(len(csr["firstBatch"]), 0)
listener.results.clear()
# Batches of 4 documents.
n = 0
for batch in cursor:
results = listener.results
started = results['started'][0]
succeeded = results['succeeded'][0]
self.assertEqual(0, len(results['failed']))
self.assertEqual('getMore', started.command_name)
self.assertEqual('pymongo_test', started.database_name)
self.assertEqual('getMore', succeeded.command_name)
csr = succeeded.reply["cursor"]
self.assertEqual(csr["ns"], "pymongo_test.test")
self.assertEqual(len(csr["nextBatch"]), 1)
self.assertEqual(csr["nextBatch"][0], batch)
self.assertEqual(decode_all(batch),
[{'_id': i} for i in range(n, min(n + 4, 10))])
n += 4
listener.results.clear()
if __name__ == "__main__":
unittest.main()
|
test_browser.py
|
import BaseHTTPServer, multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root
from tools.shared import *
# User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to
# run using another browser command line than the default system browser.
emscripten_browser = os.environ.get('EMSCRIPTEN_BROWSER')
if emscripten_browser:
cmd = shlex.split(emscripten_browser)
def run_in_other_browser(url):
Popen(cmd + [url])
webbrowser.open_new = run_in_other_browser
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum):
class ChunkedServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:8888")
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
expectedConns = 11
httpd = BaseHTTPServer.HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
class browser(BrowserCore):
@staticmethod
def audio():
print
print 'Running the browser audio tests. Make sure to listen to hear the correct results!'
print
audio_test_cases = [
'test_sdl_audio',
'test_sdl_audio_mix_channels',
'test_sdl_audio_mix',
'test_sdl_audio_quickload',
'test_sdl_audio_beeps',
'test_openal_playback',
'test_openal_buffers',
'test_freealut'
]
return unittest.TestSuite(map(browser, audio_test_cases))
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
print
print 'Running the browser tests. Make sure the browser allows popups from localhost.'
print
def test_html(self):
# test HTML generation.
self.btest('hello_world_sdl.cpp', reference='htmltest.png',
message='You should see "hello, world!" and a colored cube.')
def test_html_source_map(self):
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
time.sleep(1)
print '''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
'''
def test_emscripten_log(self):
if os.environ.get('EMCC_FAST_COMPILER') == '1': return self.skip('fastcomp uses asm, where call stacks are sometimes less clear')
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_split(self):
# test HTML generation.
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something_functions.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
def test_split_in_source_filenames(self):
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '-g', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something', 'hello_world_sdl.cpp.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
def test_compression(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("hello compressed world\n");
int result = 1;
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'page.js')), 'must be side js'
assert os.path.exists(os.path.join(self.get_dir(), 'page.js.compress')), 'must be side compressed js'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size > os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be smaller'
shutil.move(os.path.join(self.get_dir(), 'page.js'), 'page.js.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
def make_main(path):
print 'make main at', path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
("[email protected]", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("[email protected]", "file.txt"),
("./[email protected]", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print 'Testing', srcpath, dstpath
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'somefile.txt', '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT();
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false);
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "cdn/", '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT();
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for filePackagePrefixURL
#open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "http:/localhost:8888/cdn/", '))
#test()
def test_compressed_file(self):
open(os.path.join(self.get_dir(), 'datafile.txt'), 'w').write('compress this please' + (2000*'.'))
open(os.path.join(self.get_dir(), 'datafile2.txt'), 'w').write('moar' + (100*'!'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[21];
FILE *f = fopen("datafile.txt", "r");
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("file says: |%s|\n", buf);
int result = !strcmp("compress this please", buf);
FILE *f2 = fopen("datafile2.txt", "r");
fread(buf, 1, 5, f2);
buf[5] = 0;
fclose(f2);
result = result && !strcmp("moar!", buf);
printf("file 2 says: |%s|\n", buf);
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html', '--preload-file', 'datafile.txt', '--preload-file', 'datafile2.txt',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'datafile.txt')), 'must be data file'
assert os.path.exists(os.path.join(self.get_dir(), 'page.data.compress')), 'must be data file in compressed form'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size != os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be different'
shutil.move(os.path.join(self.get_dir(), 'datafile.txt'), 'datafile.txt.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.build_native_lzma()
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html',
'--preload-file', basename, '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="' + basename + '"',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')
]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_canvas(self):
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'])
# some extra coverage
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', '-O0', 'SAFE_HEAP=1'])
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', '-O2', 'SAFE_HEAP=1'])
def test_sdl_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
doReftest();
setTimeout(windowClose, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt'], manual_reference=True, post_build=post)
def test_sdl_canvas_alpha(self):
self.btest('sdl_canvas_alpha.c', reference='sdl_canvas_alpha.png', reference_slack=1)
def test_sdl_key(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1')
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1')
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret)
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-DFIRST', '-DSECRET=\'' + secret + '\'', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-DSECRET=\'' + secret + '\'', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js'])
def test_sdl_audio(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmcreatemiltaryfoot_1.wav'), os.path.join(self.get_dir(), 'sound2.wav'))
shutil.copyfile(path_from_root('tests', 'sounds', 'noise.ogg'), os.path.join(self.get_dir(), 'noise.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.ogg'), os.path.join(self.get_dir(), 'the_entertainer.ogg'))
open(os.path.join(self.get_dir(), 'bad.ogg'), 'w').write('I claim to be audio, but am lying')
open(os.path.join(self.get_dir(), 'sdl_audio.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio.c')).read()))
# use closure to check for a possible bug with closure minifying away newer Audio() attributes
Popen([PYTHON, EMCC, '-O2', '--closure', '1', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio.c'), '--preload-file', 'sound.ogg', '--preload-file', 'sound2.wav', '--embed-file', 'the_entertainer.ogg', '--preload-file', 'noise.ogg', '--preload-file', 'bad.ogg', '-o', 'page.html', '-s', 'EXPORTED_FUNCTIONS=["_main", "_play", "_play2"]']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_mix_channels(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'noise.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
open(os.path.join(self.get_dir(), 'sdl_audio_mix_channels.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_mix_channels.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_mix_channels.c'), '--preload-file', 'sound.ogg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_mix(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'pluck.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.ogg'), os.path.join(self.get_dir(), 'music.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'noise.ogg'), os.path.join(self.get_dir(), 'noise.ogg'))
open(os.path.join(self.get_dir(), 'sdl_audio_mix.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_mix.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_mix.c'), '--preload-file', 'sound.ogg', '--preload-file', 'music.ogg', '--preload-file', 'noise.ogg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_quickload(self):
open(os.path.join(self.get_dir(), 'sdl_audio_quickload.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_quickload.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_quickload.c'), '-o', 'page.html', '-s', 'EXPORTED_FUNCTIONS=["_main", "_play"]']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_beeps(self):
if os.environ.get('EMCC_FAST_COMPILER') == '1': return self.skip('todo c++ exceptions in fastcomp')
open(os.path.join(self.get_dir(), 'sdl_audio_beep.cpp'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_beep.cpp')).read()))
# use closure to check for a possible bug with closure minifying away newer Audio() attributes
Popen([PYTHON, EMCC, '-O2', '--closure', '1', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_beep.cpp'), '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html'), '--preload-file', path_from_root('tests', 'screenshot.png') + '@/', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'VERBOSE=1'])
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_openal_playback(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'audio.wav'), os.path.join(self.get_dir(), 'audio.wav'))
open(os.path.join(self.get_dir(), 'openal_playback.cpp'), 'w').write(self.with_report_result(open(path_from_root('tests', 'openal_playback.cpp')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'openal_playback.cpp'), '--preload-file', 'audio.wav', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_openal_buffers(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), os.path.join(self.get_dir(), 'the_entertainer.wav'))
self.btest('openal_buffers.c', '0', args=['--preload-file', 'the_entertainer.wav'],)
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def get_freealut_library(self):
if WINDOWS and Building.which('cmake'):
return self.get_library('freealut', os.path.join('hello_world.bc'), configure=['cmake', '.'], configure_args=['-DBUILD_TESTS=ON'])
else:
return self.get_library('freealut', os.path.join('examples', '.libs', 'hello_world.bc'), make_args=['EXEEXT=.bc'])
def test_freealut(self):
programs = self.get_freealut_library()
for program in programs:
assert os.path.exists(program)
Popen([PYTHON, EMCC, '-O2', program, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should hear "Hello World!"')
def test_worker(self):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
# no file data
for file_data in [0, 1]:
print 'file data', file_data
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) , stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
if not file_data: self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""")
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data)
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=1,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print full_es2
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=1,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args)
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print source
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']'''])
def test_emscripten_api2(self):
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1')
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1')
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1')
def test_gc(self):
if os.environ.get('EMCC_FAST_COMPILER') == '1': return self.skip('flaky in fastcomp and also non-fastcomp -O1, timing issues')
self.btest('browser_gc.cpp', '1')
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1')
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
# Does not pass due to https://bugzilla.mozilla.org/show_bug.cgi?id=924264 so disabled for now.
# def test_gles2_uniform_arrays(self):
# self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840'], args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', reference='sdl_canvas_twice.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png'])
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=' + str(1024*1024*8)])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', expected='1')
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1'])
def test_s3tc_crunch(self):
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'], stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES and os.environ.get('EMCC_FAST_COMPILER') != '1':
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js')
print 'passed asm test'
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png')
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png')
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_runtimelink(self):
return self.skip('BUILD_AS_SHARED_LIB=2 is deprecated')
main, supp = self.setup_runtimelink_test()
open(self.in_dir('supp.cpp'), 'w').write(supp)
Popen([PYTHON, EMCC, self.in_dir('supp.cpp'), '-o', 'supp.js', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'BUILD_AS_SHARED_LIB=2', '-O2', '-s', 'ASM_JS=0']).communicate()
shutil.move(self.in_dir('supp.js'), self.in_dir('supp.so'))
self.btest(main, args=['-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'RUNTIME_LINKED_LIBS=["supp.so"]', '-DBROWSER=1', '-O2', '-s', 'ASM_JS=0'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
def test_module(self):
if os.environ.get('EMCC_FAST_COMPILER') == '1': return self.skip('todo in fastcomp')
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'])
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--list_browsers'])
assert 'Traceback' not in result
def test_emrun(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_exit.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--verbose', os.path.join(outdir, 'hello_world.html'), '1', '2', '3', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if emscripten_browser is not None:
args += ['--browser', emscripten_browser]
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: 3' in stdout
assert 'hello, world!' in stdout
assert 'hello, error stream!' in stderr
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', path_from_root('tests', 'uuid', 'test.js')], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open(path_from_root('tests', 'uuid', 'test.js')).read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js(path_from_root('tests', 'uuid', 'test.js'), full_output=True)
print out
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1')
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
Module['addRunDependency']('test_run_dependency');
Module['removeRunDependency']('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
self.btest(path_from_root('tests', 'test_html5.c'), expected='0')
|
tesis4.py
|
from multiprocessing import Process, Queue
import mysql.connector
import cv2
import numpy as np
from datetime import datetime
def run_camara(direccion, arreglo_jugadores, cola, indice_proceso_actual):
cap = cv2.VideoCapture(direccion)
jugador_actual = None
continuar1 = True
if cap.isOpened():
while continuar1:
coincidencias = []
ret, frame = cap.read()
if ret:
for jugador in arreglo_jugadores:
try:
coincidencias.append([jugador[0], video_feature_matching1(frame, "img/jugadores/" + str(jugador[9])), jugador[7]])
except Exception as e:
print("exception0: " + str(e))
coincidencias.append([jugador[0], 0, jugador[5]])
mayor1 = 0
for i in coincidencias:
if i[1] > mayor1:
mayor1 = i[1]
jugador_actual = i
cola.put([jugador_actual, continuar1, frame,
cap.get(cv2.CAP_PROP_FPS), int(cap.get(3)), int(cap.get(4))])
elif cv2.waitKey(1) & 0xFF == 27:
continuar1 = False
break
else:
continuar1 = False
break
cap.release()
else:
print("error")
def video_feature_matching1(frame, base):
img1 = cv2.imread(base, 0) # objeto base
img2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # .frame a analizar
# Iniciar ORB
orb = cv2.ORB_create()
# Encontrar los puntos claves
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
# Parametros FLANN
FLANN_INDEX_LSH = 6
index_params = dict(algorithm=FLANN_INDEX_LSH, table_number=6, key_size=12, multi_probe_level=2) # 2
search_params = dict(checks=1) # Numero de Chequeos
# Iniciar FLANN
flann = cv2.FlannBasedMatcher(index_params, search_params)
good = []
for m, n in flann.knnMatch(des1, des2, k=2):
if m.distance < 0.75*n.distance:
good.append(m)
return len(good)
if __name__ == '__main__':
COLA1 = Queue()
COLA2 = Queue()
MYDB = mysql.connector.connect(host="localhost", user="root", passwd="", database="videoroom")
MYCURSOR = MYDB.cursor(buffered=True)
MYCURSOR.execute("SELECT * FROM jugador WHERE R32=1")
JUGADORES = MYCURSOR.fetchall()
CAMARA1 = Process(target=run_camara, args=('ejecutar/video_2019-04-16_20-30-37.mp4', JUGADORES, COLA1, 1))
CAMARA2 = Process(target=run_camara, args=('ejecutar/video_2019-04-25_15-35-02.mp4', JUGADORES, COLA2, 2))
CAMARA1.start()
CAMARA2.start()
indice_proceso = 0
ult_jug = 0
nro_cam = 0
tolerancia = 60
finished = False
out_name = ""
out = None
codec = cv2.VideoWriter_fourcc(*"mp4v")
MYCURSOR.execute("SELECT juego_actual FROM control")
juego_actual = MYCURSOR.fetchone()[0]
def seleccion_insercion_grabado(camara_secundaria, jugador_mayor):
global ult_jug, nro_cam, tolerancia, finished, out, out_name, codec, nro_cam
#cola.put([jugador_actual, continuar, frame, fps, width, height])
if ult_jug != jugador_mayor[0] or tolerancia >= 60:
if ult_jug != jugador_mayor[0] and tolerancia < 60:
MYCURSOR.execute("SELECT id FROM turnos WHERE id_calendario=" + str(juego_actual) + " ORDER BY id DESC LIMIT 1")
ultimo_turno1 = MYCURSOR.fetchone()[0]
MYCURSOR.execute("INSERT INTO video(direccion, id_turno, camara) VALUES (%s, %s, %s)", ("videos/" + out_name, ultimo_turno1, nro_cam))
MYDB.commit()
finished = False
out.release()
MYCURSOR.execute("SELECT COUNT(id), inning FROM turnos WHERE id_calendario=" +
str(juego_actual) + " ORDER BY id DESC LIMIT 1")
datos_turnos_inning = MYCURSOR.fetchone()
nro_turnos = datos_turnos_inning[0]
if nro_turnos < 1:
inning_actual = 1
else:
inning_actual = datos_turnos_inning[1]
MYCURSOR.execute("SELECT posicion FROM turnos WHERE id_calendario=" +
str(juego_actual) + " ORDER BY id DESC LIMIT 1")
ult_jug_pos = MYCURSOR.fetchone()[0]
if ult_jug_pos != "PT" and jugador_mayor[2] == "PT":
inning_actual += 1
hora = datetime.now().strftime('%H:%M:%S')
MYCURSOR.execute("INSERT INTO turnos(id_calendario, id_jugador, posicion, inning, tiempo_inicio) VALUES(%s, %s, %s, %s, %s)",
(juego_actual, jugador_mayor[0], jugador_mayor[2], inning_actual, hora))
MYDB.commit()
nro_turnos += 1
ult_jug = jugador_mayor[0]
out_name = 'out' + \
str(jugador_mayor[0])+'-' + str(nro_turnos) + '.mp4'
out = cv2.VideoWriter('videos/videos/' + out_name, codec, camara_secundaria[3], (camara_secundaria[4], camara_secundaria[5]))
finished = True
continuar = True
while continuar:
CAMARA1 = COLA1.get()
CAMARA2 = COLA2.get()
#cola.put([jugador_actual, continuar, frame, width, height])
if CAMARA1[0] and CAMARA2[0] and CAMARA1[1] and CAMARA2[1]:
if CAMARA1[0][1] > 15 and CAMARA1[0][1] > CAMARA2[0][1]:
mayor = CAMARA1[0]
seleccion_insercion_grabado(CAMARA2, mayor)
nro_cam = 1
tolerancia = 0
out.write(CAMARA2[2])
elif CAMARA2[0][1] > 15 and CAMARA2[0][1] > CAMARA1[0][1]:
mayor = CAMARA2[0]
seleccion_insercion_grabado(CAMARA1, mayor)
nro_cam = 2
tolerancia = 0
out.write(CAMARA1[2])
print(tolerancia)
else:
print(tolerancia)
if tolerancia < 60:
tolerancia += 1
if nro_cam == 1:
out.write(CAMARA2[2])
else:
out.write(CAMARA1[2])
else:
print(tolerancia)
if finished:
MYCURSOR.execute("SELECT id FROM turnos WHERE id_calendario=" + str(juego_actual) + " ORDER BY id DESC LIMIT 1")
ultimo_turno = MYCURSOR.fetchone()[0]
MYCURSOR.execute("INSERT INTO video(direccion, id_turno, camara) VALUES (%s, %s, %s)", ("videos/" + out_name, ultimo_turno, nro_cam))
MYDB.commit()
finished = False
out.release()
try:
image = cv2.resize(CAMARA1[2], (0, 0), None, .75, .75)
image1 = cv2.resize(CAMARA2[2], (0, 0), None, .75, .75)
numpy_horizontal_concat = np.concatenate((image1, image), axis=1)
cv2.imshow('video', numpy_horizontal_concat)
except Exception as e:
print("Excepcion de video: " + str(e))
if cv2.waitKey(1) & 0xFF == 27:
continuar = False
break
# para avi --> cv2.VideoWriter_fourcc(*"FMP4")
# para mp4 --> cv2.VideoWriter_fourcc(*"mp4v")
# para webm --> cv2.VideoWriter_fourcc(*"VP80")
|
buttonthread.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Steven P. Goldsmith
# See LICENSE.md for details.
"""
Use a thread to monitor edge events in background
-------------
Should work on any board with a button built in. Just change chip and line
value as needed.
"""
import sys, time, threading
from argparse import *
from cffi import FFI
from libgpiod import libgpiod
class buttonthread:
def __init__(self):
"""Create library and ffi interfaces.
"""
self.gpiod = libgpiod.libgpiod()
self.lib = self.gpiod.lib
self.ffi = self.gpiod.ffi
def waitForEdge(self, line, consumer, timeoutSecs):
print("Thread running\n")
timespec = self.ffi.new("struct timespec*")
timespec.tv_sec = timeoutSecs
rc = 1
event = self.ffi.new("struct gpiod_line_event*")
while rc == 1:
# Wait for event
rc = self.lib.gpiod_line_event_wait(line, timespec)
if rc == 0:
print("Thread timed out")
elif rc == 1:
# Get event off queue
if self.lib.gpiod_line_event_read(line, event) == 0:
if event.event_type == self.lib.GPIOD_LINE_EVENT_RISING_EDGE:
print("Rising edge timestamp %s" % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(event.ts.tv_sec)))
else:
print("Falling edge timestamp %s" % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(event.ts.tv_sec)))
else:
print("gpiod_line_event_read error")
rc = -1
else:
print("gpiod_line_event_wait error")
rc = -1
print("Thread exit")
def main(self, chip, line):
"""Use thread to wait for edge events while main method does other stuff.
"""
print ("libgpiod version %s" % self.ffi.string(self.lib.gpiod_version_string()).decode('utf-8'))
gpiod_chip = self.lib.gpiod_chip_open_by_number(chip)
# Verify the chip was opened
if gpiod_chip != self.ffi.NULL:
print("Name: %s, label: %s, lines: %d" % (self.ffi.string(gpiod_chip.name).decode('utf-8'), self.ffi.string(gpiod_chip.label).decode('utf-8'), gpiod_chip.num_lines))
gpiod_line = self.lib.gpiod_chip_get_line(gpiod_chip, line)
# Verify we have line
if gpiod_line != self.ffi.NULL:
consumer = sys.argv[0][:-3]
# Request detection of both edge events
if self.lib.gpiod_line_request_both_edges_events(gpiod_line, consumer.encode('utf-8')) == 0:
# Kick off thread
thread = threading.Thread(target=self.waitForEdge, args=(gpiod_line, consumer.encode('utf-8'), 5,))
thread.start()
count = 0
# Just simulating main program doing something else
while count < 30 and thread.isAlive():
print("Main program doing stuff, press button")
time.sleep(1)
count += 1
# If thread is still alive wait for it to time out
if thread.isAlive():
print("Waiting for thread to exit, stop pressing button for 5 seconds")
thread.join()
else:
print("Unable request both edges for line %d" % line)
self.lib.gpiod_line_release(gpiod_line)
else:
print("Unable to get line %d" % line)
self.lib.gpiod_chip_close(gpiod_chip)
else:
print("Unable to open chip %d" % chip)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--chip", help="GPIO chip number (default 1 '/dev/gpiochip1')", type=int, default=1)
parser.add_argument("--line", help="GPIO line number (default 3 button on NanoPi Duo)", type=int, default=3)
args = parser.parse_args()
obj = buttonthread()
obj.main(args.chip, args.line)
|
test_utils.py
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
from test.unit import temptree
import ctypes
import contextlib
import errno
import eventlet
import eventlet.event
import functools
import grp
import logging
import os
import mock
import random
import re
import socket
import stat
import sys
import json
import math
import six
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import range
from textwrap import dedent
import tempfile
import time
import traceback
import unittest
import fcntl
import shutil
from getpass import getuser
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid, ThreadPoolDead
from swift.common import utils
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response
from test.unit import FakeLogger
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
def test_invalid_string_conversion(self):
t = utils.Timestamp(time.time())
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
self.assertIs(False, utils.Timestamp(ts) != ts)
self.assertIs(False, utils.Timestamp(ts) is None)
self.assertIs(True, utils.Timestamp(ts) is not None)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_short_format_with_offset(self):
expected = '1402436408.91203_f0'
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.short)
expected = '1402436408.91203'
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.short)
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%f_00000000' % now,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_cmp_with_none(self):
self.assertGreater(utils.Timestamp(0), None)
self.assertGreater(utils.Timestamp(1.0), None)
self.assertGreater(utils.Timestamp(1.0, 42), None)
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
class TestTimestampEncoding(unittest.TestCase):
def setUp(self):
t0 = utils.Timestamp(0.0)
t1 = utils.Timestamp(997.9996)
t2 = utils.Timestamp(999)
t3 = utils.Timestamp(1000, 24)
t4 = utils.Timestamp(1001)
t5 = utils.Timestamp(1002.00040)
# encodings that are expected when explicit = False
self.non_explicit_encodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18', (t3, t3, None)),
)
# mappings that are expected when explicit = True
self.explicit_encodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
)
# mappings that are expected when explicit = True or False
self.encodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18', (t3, None, t1)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
# decodings that are expected when explicit = False
self.non_explicit_decodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18-5f5e100', (t3, t0, t0)),
)
# decodings that are expected when explicit = True
self.explicit_decodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
)
# decodings that are expected when explicit = True or False
self.decodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
def _assertEqual(self, expected, actual, test):
self.assertEqual(expected, actual,
'Got %s but expected %s for parameters %s'
% (actual, expected, test))
def test_encoding(self):
for test in self.explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], True)
self._assertEqual(test[0], actual, test[1])
for test in self.non_explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], False)
self._assertEqual(test[0], actual, test[1])
for explicit in (True, False):
for test in self.encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], explicit)
self._assertEqual(test[0], actual, test[1])
def test_decoding(self):
for test in self.explicit_decodings:
actual = utils.decode_timestamps(test[0], True)
self._assertEqual(test[1], actual, test[0])
for test in self.non_explicit_decodings:
actual = utils.decode_timestamps(test[0], False)
self._assertEqual(test[1], actual, test[0])
for explicit in (True, False):
for test in self.decodings:
actual = utils.decode_timestamps(test[0], explicit)
self._assertEqual(test[1], actual, test[0])
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def test_lock_path(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1):
exc = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except LockTimeout as err:
exc = err
self.assertTrue(exc is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_lock_path_num_sleeps(self):
tmpdir = mkdtemp()
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
finally:
shutil.rmtree(tmpdir)
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
def test_lock_path_class(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_LoggerFileObject_recursion(self):
crashy_calls = [0]
class CrashyLogger(logging.Handler):
def emit(self, record):
crashy_calls[0] += 1
try:
# Pretend to be trying to send to syslog, but syslogd is
# dead. We need the raise here to set sys.exc_info.
raise socket.error(errno.ENOTCONN, "This is an ex-syslog")
except socket.error:
self.handleError(record)
logger = logging.getLogger()
logger.addHandler(CrashyLogger())
# Set up some real file descriptors for stdio. If you run
# nosetests with "-s", you already have real files there, but
# otherwise they're StringIO objects.
#
# In any case, since capture_stdio() closes sys.stdin and friends,
# we'd want to set up some sacrificial files so as to not goof up
# the testrunner.
new_stdin = open(os.devnull, 'r+b')
new_stdout = open(os.devnull, 'w+b')
new_stderr = open(os.devnull, 'w+b')
with contextlib.closing(new_stdin), contextlib.closing(new_stdout), \
contextlib.closing(new_stderr):
# logging.raiseExceptions is set to False in test/__init__.py, but
# is True in Swift daemons, and the error doesn't manifest without
# it.
with mock.patch('sys.stdin', new_stdin), \
mock.patch('sys.stdout', new_stdout), \
mock.patch('sys.stderr', new_stderr), \
mock.patch.object(logging, 'raiseExceptions', True):
# Note: since stdio is hooked up to /dev/null in here, using
# pdb is basically impossible. Sorry about that.
utils.capture_stdio(logger)
logger.info("I like ham")
self.assertTrue(crashy_calls[0], 1)
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertTrue('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key1': {'key2': {'value1': 1, 'value2': 2}}}
result_dict = {'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(result_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEqual(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('errno.ECONNREFUSED message test' not in log_msg)
self.assertTrue('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' not in log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test no txn on info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('txn' not in log_msg)
self.assertTrue('12345' not in log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertTrue('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' in log_msg)
self.assertTrue('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' not in log_msg)
self.assertTrue('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = 'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read(self, conf_path):
return True
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', ''), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaises(SystemExit, utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
import pwd
self.assertEqual(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
groups.append(pwd.getpwnam(user).pw_gid)
self.assertEqual(set(groups), set(os.getgroups()))
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
def test_drop_privileges_no_call_setsid(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
'umask')
bad_func_calls = ('setsid',)
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
# exercise the code
utils.drop_privileges(user, call_setsid=False)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
for func in bad_func_calls:
self.assertTrue(func not in utils.os.called_funcs)
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.hostname, '127.0.0.1')
self.assertEqual(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEqual(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEqual(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEqual(parsed.hostname, '::1')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEqual(parsed.hostname, '')
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertEqual(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertEqual(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = FakeLogger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_replication_quorum_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc),
'[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024')
self.assertEqual(err.errno, errno.ENOSPC)
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc),
'[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024')
self.assertEqual(err.errno, errno.ENOSPC)
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc),
'[Errno 28] FALLOCATE_RESERVE fail 1024 <= 2048')
self.assertEqual(err.errno, errno.ENOSPC)
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc),
'[Errno 28] FALLOCATE_RESERVE fail 1024 <= 2048')
self.assertEqual(err.errno, errno.ENOSPC)
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(1))
except OSError as err:
exc = err
self.assertEqual(str(exc),
'[Errno 28] FALLOCATE_RESERVE fail 1023 <= 1023')
self.assertEqual(err.errno, errno.ENOSPC)
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE = 1022
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1023 reserved, have 1024 * 1 free, and file size is 0, so
# succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, and even though
# file size is 0, since we're under the reserve, fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc),
'[Errno 28] FALLOCATE_RESERVE fail 1024 <= 1024')
self.assertEqual(err.errno, errno.ENOSPC)
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('garbage')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertEqual(ts, None)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_quote(self):
res = utils.quote('/v1/a/c3/subdirx/')
assert res == '/v1/a/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/')
assert res == '/v1/a%26b/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/', safe='&')
assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F'
unicode_sample = u'\uc77c\uc601'
account = 'abc_' + unicode_sample
valid_utf8_str = utils.get_valid_utf8_str(account)
account = 'abc_' + unicode_sample.encode('utf-8')[::-1]
invalid_utf8_str = utils.get_valid_utf8_str(account)
self.assertEqual('abc_%EC%9D%BC%EC%98%81',
utils.quote(valid_utf8_str))
self.assertEqual('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(invalid_utf8_str))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch(
'time.gmtime',
mock.MagicMock(side_effect=[time.gmtime(10001.0)])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp(dir='/tmp')
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or time.time()
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, time.time())
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, time.time())
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, time.time())
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = time.time()
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = time.time()
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], time.time())
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], time.time())
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], time.time())
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
utils.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
utils.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
utils.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertTrue('swift' in utils._swift_admin_info)
self.assertTrue('admin_foo' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertTrue('admin_lorem' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertTrue('cap1' in utils._swift_admin_info)
self.assertTrue('ac1_foo' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('ac1_lorem' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertTrue('swift' not in utils._swift_info)
self.assertTrue('cap1' not in utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('disallowed_sections' in info['admin'])
self.assertTrue('cap1' in info['admin']['disallowed_sections'])
self.assertTrue('cap2' not in info['admin']['disallowed_sections'])
self.assertTrue('cap3' in info['admin']['disallowed_sections'])
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertTrue('cap3' not in info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertTrue('cap1_foo' not in info['cap1'])
self.assertTrue('c' not in info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = utils.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test uses the real getaddrinfo, so we patch over the mock to
# put the real one back. If we just stop the mock, then
# unittest.exit() blows up, but stacking real-fake-real works okay.
with mock.patch.object(utils.socket, 'getaddrinfo',
self.real_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
def test_get_valid_utf8_str(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEqual(b'\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestThreadPool(unittest.TestCase):
def setUp(self):
self.tp = None
def tearDown(self):
if self.tp:
self.tp.terminate()
def _pipe_count(self):
# Counts the number of pipes that this process owns.
fd_dir = "/proc/%d/fd" % os.getpid()
def is_pipe(path):
try:
stat_result = os.stat(path)
return stat.S_ISFIFO(stat_result.st_mode)
except OSError:
return False
return len([fd for fd in os.listdir(fd_dir)
if is_pipe(os.path.join(fd_dir, fd))])
def _thread_id(self):
return threading.current_thread().ident
def _capture_args(self, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
def _raise_valueerror(self):
return int('fishcakes')
def test_run_in_thread_with_threads(self):
tp = self.tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_with_threads(self):
# with nthreads > 0, force_run_in_thread looks just like run_in_thread
tp = self.tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.force_run_in_thread,
self._raise_valueerror)
def test_run_in_thread_without_threads(self):
# with zero threads, run_in_thread doesn't actually do so
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertEqual(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.run_in_thread,
self._raise_valueerror)
def test_force_run_in_thread_without_threads(self):
# with zero threads, force_run_in_thread uses eventlet.tpool
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.force_run_in_thread,
self._raise_valueerror)
def test_preserving_stack_trace_from_thread(self):
def gamma():
return 1 / 0 # ZeroDivisionError
def beta():
return gamma()
def alpha():
return beta()
tp = self.tp = utils.ThreadPool(1)
try:
tp.run_in_thread(alpha)
except ZeroDivisionError:
# NB: format is (filename, line number, function name, text)
tb_func = [elem[2] for elem
in traceback.extract_tb(sys.exc_info()[2])]
else:
self.fail("Expected ZeroDivisionError")
self.assertEqual(tb_func[-1], "gamma")
self.assertEqual(tb_func[-2], "beta")
self.assertEqual(tb_func[-3], "alpha")
# omit the middle; what's important is that the start and end are
# included, not the exact names of helper methods
self.assertEqual(tb_func[1], "run_in_thread")
self.assertEqual(tb_func[0], "test_preserving_stack_trace_from_thread")
def test_terminate(self):
initial_thread_count = threading.activeCount()
initial_pipe_count = self._pipe_count()
tp = utils.ThreadPool(4)
# do some work to ensure any lazy initialization happens
tp.run_in_thread(os.path.join, 'foo', 'bar')
tp.run_in_thread(os.path.join, 'baz', 'quux')
# 4 threads in the ThreadPool, plus one pipe for IPC; this also
# serves as a sanity check that we're actually allocating some
# resources to free later
self.assertEqual(initial_thread_count, threading.activeCount() - 4)
self.assertEqual(initial_pipe_count, self._pipe_count() - 2)
tp.terminate()
self.assertEqual(initial_thread_count, threading.activeCount())
self.assertEqual(initial_pipe_count, self._pipe_count())
def test_cant_run_after_terminate(self):
tp = utils.ThreadPool(0)
tp.terminate()
self.assertRaises(ThreadPoolDead, tp.run_in_thread, lambda: 1)
self.assertRaises(ThreadPoolDead, tp.force_run_in_thread, lambda: 1)
def test_double_terminate_doesnt_crash(self):
tp = utils.ThreadPool(0)
tp.terminate()
tp.terminate()
tp = utils.ThreadPool(1)
tp.terminate()
tp.terminate()
def test_terminate_no_threads_doesnt_crash(self):
tp = utils.ThreadPool(0)
tp.terminate()
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertEqual(next(pile), None)
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), '')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(2), 'ab')
self.assertEqual(fp.read(2), 'cd')
self.assertEqual(fp.read(2), 'ef')
self.assertEqual(fp.read(2), 'g')
self.assertEqual(fp.read(2), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
'--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
self.assertRaises(StopIteration, it.next)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabc'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abc')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
'jkl\r\n\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
'\r\njkl\r\n\r\n--unique--'),
'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = StringIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
if __name__ == '__main__':
unittest.main()
|
openvino_yolov3_MultiStick_test.py
|
import sys, os, cv2, time, heapq, argparse
import numpy as np, math
from openvino.inference_engine import IENetwork, IEPlugin
import multiprocessing as mp
from time import sleep
import threading
yolo_scale_13 = 13
yolo_scale_26 = 26
yolo_scale_52 = 52
classes = 80
coords = 4
num = 3
anchors = [10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326]
LABELS = ("person", "bicycle", "car", "motorbike", "aeroplane",
"bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird",
"cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard","tennis racket", "bottle",
"wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange",
"broccoli", "carrot", "hot dog", "pizza", "donut",
"cake", "chair", "sofa", "pottedplant", "bed",
"diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven",
"toaster", "sink", "refrigerator", "book", "clock",
"vase", "scissors", "teddy bear", "hair drier", "toothbrush")
label_text_color = (255, 255, 255)
label_background_color = (125, 175, 75)
box_color = (255, 128, 0)
box_thickness = 1
processes = []
fps = ""
detectfps = ""
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
lastresults = None
def EntryIndex(side, lcoords, lclasses, location, entry):
n = int(location / (side * side))
loc = location % (side * side)
return int(n * side * side * (lcoords + lclasses + 1) + entry * side * side + loc)
class DetectionObject():
xmin = 0
ymin = 0
xmax = 0
ymax = 0
class_id = 0
confidence = 0.0
def __init__(self, x, y, h, w, class_id, confidence, h_scale, w_scale):
self.xmin = int((x - w / 2) * w_scale)
self.ymin = int((y - h / 2) * h_scale)
self.xmax = int(self.xmin + w * w_scale)
self.ymax = int(self.ymin + h * h_scale)
self.class_id = class_id
self.confidence = confidence
def IntersectionOverUnion(box_1, box_2):
width_of_overlap_area = min(box_1.xmax, box_2.xmax) - max(box_1.xmin, box_2.xmin)
height_of_overlap_area = min(box_1.ymax, box_2.ymax) - max(box_1.ymin, box_2.ymin)
area_of_overlap = 0.0
if (width_of_overlap_area < 0.0 or height_of_overlap_area < 0.0):
area_of_overlap = 0.0
else:
area_of_overlap = width_of_overlap_area * height_of_overlap_area
box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin)
box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin)
area_of_union = box_1_area + box_2_area - area_of_overlap
return (area_of_overlap / area_of_union)
def ParseYOLOV3Output(blob, resized_im_h, resized_im_w, original_im_h, original_im_w, threshold, objects):
out_blob_h = blob.shape[2]
out_blob_w = blob.shape[3]
side = out_blob_h
anchor_offset = 0
if side == yolo_scale_13:
anchor_offset = 2 * 6
elif side == yolo_scale_26:
anchor_offset = 2 * 3
elif side == yolo_scale_52:
anchor_offset = 2 * 0
side_square = side * side
output_blob = blob.flatten()
for i in range(side_square):
row = int(i / side)
col = int(i % side)
for n in range(num):
obj_index = EntryIndex(side, coords, classes, n * side * side + i, coords)
box_index = EntryIndex(side, coords, classes, n * side * side + i, 0)
scale = output_blob[obj_index]
if (scale < threshold):
continue
x = (col + output_blob[box_index + 0 * side_square]) / side * resized_im_w
y = (row + output_blob[box_index + 1 * side_square]) / side * resized_im_h
height = math.exp(output_blob[box_index + 3 * side_square]) * anchors[anchor_offset + 2 * n + 1]
width = math.exp(output_blob[box_index + 2 * side_square]) * anchors[anchor_offset + 2 * n]
for j in range(classes):
class_index = EntryIndex(side, coords, classes, n * side_square + i, coords + 1 + j)
prob = scale * output_blob[class_index]
if prob < threshold:
continue
obj = DetectionObject(x, y, height, width, j, prob, (original_im_h / resized_im_h), (original_im_w / resized_im_w))
objects.append(obj)
return objects
def camThread(LABELS, results, frameBuffer, camera_width, camera_height, vidfps):
global fps
global detectfps
global lastresults
global framecount
global detectframecount
global time1
global time2
global cam
global window_name
#cam = cv2.VideoCapture(0)
#if cam.isOpened() != True:
# print("USB Camera Open Error!!!")
# sys.exit(0)
#cam.set(cv2.CAP_PROP_FPS, vidfps)
#cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
#cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
#window_name = "USB Camera"
#wait_key_time = 1
cam = cv2.VideoCapture("data/input/testvideo4.mp4")
camera_width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
camera_height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(cam.get(cv2.CAP_PROP_FRAME_COUNT))
window_name = "Movie File"
wait_key_time = int(1000 / vidfps)
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
# USB Camera Stream Read
s, color_image = cam.read()
if not s:
continue
if frameBuffer.full():
frameBuffer.get()
height = color_image.shape[0]
width = color_image.shape[1]
frameBuffer.put(color_image.copy())
if not results.empty():
objects = results.get(False)
detectframecount += 1
for obj in objects:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)
lastresults = objects
else:
if not isinstance(lastresults, type(None)):
for obj in lastresults:
if obj.confidence < 0.2:
continue
label = obj.class_id
confidence = obj.confidence
if confidence > 0.2:
label_text = LABELS[label] + " (" + "{:.1f}".format(confidence * 100) + "%)"
cv2.rectangle(color_image, (obj.xmin, obj.ymin), (obj.xmax, obj.ymax), box_color, box_thickness)
cv2.putText(color_image, label_text, (obj.xmin, obj.ymin - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_text_color, 1)
cv2.putText(color_image, fps, (width-170,15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.putText(color_image, detectfps, (width-170,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38,0,255), 1, cv2.LINE_AA)
cv2.imshow(window_name, cv2.resize(color_image, (width, height)))
if cv2.waitKey(wait_key_time)&0xFF == ord('q'):
sys.exit(0)
## Print FPS
framecount += 1
if framecount >= 15:
fps = "(Playback) {:.1f} FPS".format(time1/15)
detectfps = "(Detection) {:.1f} FPS".format(detectframecount/time2)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
t2 = time.perf_counter()
elapsedTime = t2-t1
time1 += 1/elapsedTime
time2 += elapsedTime
# l = Search list
# x = Search target value
def searchlist(l, x, notfoundvalue=-1):
if x in l:
return l.index(x)
else:
return notfoundvalue
def async_infer(ncsworker):
ncsworker.skip_frame_measurement()
while True:
ncsworker.predict_async()
class NcsWorker(object):
def __init__(self, devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps):
self.devid = devid
self.frameBuffer = frameBuffer
self.model_xml = "./lrmodels/YoloV3/FP16/frozen_yolo_v3.xml"
self.model_bin = "./lrmodels/YoloV3/FP16/frozen_yolo_v3.bin"
self.camera_width = camera_width
self.camera_height = camera_height
self.m_input_size = 416
self.threshould = 0.7
self.num_requests = 4
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
self.plugin = IEPlugin(device="MYRIAD")
self.net = IENetwork(model=self.model_xml, weights=self.model_bin)
self.input_blob = next(iter(self.net.inputs))
self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)
self.results = results
self.number_of_ncs = number_of_ncs
self.predict_async_time = 800
self.skip_frame = 0
self.roop_frame = 0
self.vidfps = vidfps
def image_preprocessing(self, color_image):
prepimg = cv2.resize(color_image, (self.m_input_size, self.m_input_size))
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def skip_frame_measurement(self):
surplustime_per_second = (1000 - self.predict_async_time)
if surplustime_per_second > 0.0:
frame_per_millisecond = (1000 / self.vidfps)
total_skip_frame = surplustime_per_second / frame_per_millisecond
self.skip_frame = int(total_skip_frame / self.num_requests)
else:
self.skip_frame = 0
def predict_async(self):
try:
if self.frameBuffer.empty():
return
self.roop_frame += 1
if self.roop_frame <= self.skip_frame:
self.frameBuffer.get()
return
self.roop_frame = 0
prepimg = self.image_preprocessing(self.frameBuffer.get())
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1:
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))
cnt, dev = heapq.heappop(self.heap_request)
if self.exec_net.requests[dev].wait(0) == 0:
self.exec_net.requests[dev].wait(-1)
objects = []
outputs = self.exec_net.requests[dev].outputs
for output in outputs.values():
objects = ParseYOLOV3Output(output, self.m_input_size, self.m_input_size, self.camera_height, self.camera_width, self.threshould, objects)
objlen = len(objects)
for i in range(objlen):
if (objects[i].confidence == 0.0):
continue
for j in range(i + 1, objlen):
if (IntersectionOverUnion(objects[i], objects[j]) >= 0.4):
objects[j].confidence = 0
self.results.put(objects)
self.inferred_request[dev] = 0
else:
heapq.heappush(self.heap_request, (cnt, dev))
except:
import traceback
traceback.print_exc()
def inferencer(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps):
# Init infer threads
threads = []
for devid in range(number_of_ncs):
thworker = threading.Thread(target=async_infer, args=(NcsWorker(devid, frameBuffer, results, camera_width, camera_height, number_of_ncs, vidfps),))
thworker.start()
threads.append(thworker)
for th in threads:
th.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-numncs','--numberofncs',dest='number_of_ncs',type=int,default=1,help='Number of NCS. (Default=1)')
args = parser.parse_args()
number_of_ncs = args.number_of_ncs
camera_width = 320
camera_height = 240
vidfps = 30
try:
mp.set_start_method('forkserver')
frameBuffer = mp.Queue(10)
results = mp.Queue()
# Start detection MultiStick
# Activation of inferencer
p = mp.Process(target=inferencer, args=(results, frameBuffer, number_of_ncs, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
sleep(number_of_ncs * 7)
# Start streaming
p = mp.Process(target=camThread, args=(LABELS, results, frameBuffer, camera_width, camera_height, vidfps), daemon=True)
p.start()
processes.append(p)
while True:
sleep(1)
except:
import traceback
traceback.print_exc()
finally:
for p in range(len(processes)):
processes[p].terminate()
print("\n\nFinished\n\n")
|
test_generator_mt19937.py
|
import sys
import pytest
import numpy as np
from numpy.dual import cholesky, eigh, svd
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed(object):
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
class TestMultivariateHypergeometric(object):
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers(object):
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (np.bool, bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (
np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=np.object)
high_o = np.array([high] * 10, dtype=np.object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
import hashlib
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
translate.py
|
import argparse
import io
import itertools
import multiprocessing
import os
import subprocess
import sys
import threading
import time
import traceback
from collections import OrderedDict, deque
CMD = 'python translate.py --model {model} {extra}'
def count_lines(f):
i = 0
if not os.path.exists(f):
return i
with open(f) as r:
for _ in r:
i += 1
return i
def get_proc(model, device, extra_args, silent):
env = dict(os.environ) # Make a copy of the current environment
if isinstance(extra_args, (list, tuple)):
extra_args = " ".join(extra_args)
env['CUDA_VISIBLE_DEVICES'] = f'{device}'
cmd = CMD.format(
model=model,
extra=extra_args)
if not silent:
sys.stderr.write("translate cmd: {}\n".format(cmd))
p = subprocess.Popen(cmd.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=open(os.devnull, 'w'),
env=env)
return p
def worker(p, queue, rqueue):
stdin = io.TextIOWrapper(p.stdin, encoding='utf-8')
stdout = io.TextIOWrapper(p.stdout, encoding='utf-8')
indices = []
while True:
i, line = queue.get()
if i < 0: # end of queue
stdin.close()
break
stdin.write(f'{line.strip()}\n')
indices.append(i)
for i in indices:
out = stdout.readline()
rqueue.put((i, out))
def translate(model, pending, done, src2out, devices, entry, extra, silent, write_stdout):
n_pending = list(map(count_lines, pending))
n_done = sum(map(count_lines, done)) if done else 0
n_total = sum(n_pending) + n_done
if sum(n_pending) == 0:
return
out_dir = os.path.dirname(next(iter(src2out.values())))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if pending:
tic = time.time()
fds = [open(f, encoding='utf-8') for f in pending]
reader = itertools.chain(*fds)
queue = multiprocessing.Queue()
rqueue = multiprocessing.Queue()
# feed
for i, line in enumerate(reader):
queue.put((i, line))
for _ in devices:
queue.put((-1, None)) # end of queue
processes = [get_proc(model, device, extra, silent) for device in devices]
threads = [threading.Thread(target=worker, args=(p, queue, rqueue)) for p in processes]
try:
for t in threads:
t.daemon = True
t.start()
hist = deque(maxlen=5) # average over 5 past records
# consume to prevent holding all translations in memory
buffer = []
i = 0
j = 0
writer = None
tic_speed = time.time()
while i < len(pending):
time.sleep(0.5)
for p in processes:
p.poll()
if p.returncode is not None and p.returncode > 0:
sys.stderr.write("Error occurs in worker")
raise RuntimeError()
added = False
while not rqueue.empty():
buffer.append(rqueue.get())
added = True
if added:
buffer = sorted(buffer, key=lambda x: x[0])
while buffer and buffer[0][0] == sum(n_pending[:i]) + j:
idx, trans = buffer[0]
if writer is None:
if write_stdout:
writer = sys.stdout
else:
writer = open(src2out[pending[i]], 'w')
writer.write(trans)
j += 1
if not j < n_pending[i]:
if not write_stdout and writer is not None:
writer.close()
writer = None
i += 1
j = 0
buffer = buffer[1:] # remove processed output
if not silent:
n1 = n_done + sum(n_pending[:i]) + j + rqueue.qsize() + len(buffer)
hist.append(n1)
rate = 0.0
if len(hist) > 1:
rate = (hist[-1] - hist[0] + 0.0) / len(hist) / (time.time() - tic_speed)
tic_speed = time.time()
duration = time.strftime('%H:%M:%S', time.gmtime(time.time() - tic))
sys.stderr.write(f'\r{n1}/{n_total}, {rate:.2f} s/sec, {duration}')
if not silent:
sys.stderr.write('\n')
except Exception as e:
traceback.print_exc()
for p in processes:
p.terminate()
sys.exit(1)
def main(args):
model = args.model
entry = args.entry
inputs = args.inputs
tag = args.tag
cuda = args.cuda
silent = args.silent
remains = args.remains
write_stdout = args.stdout
force = args.force
extra = " ".join(remains)
if cuda:
devices = cuda
else:
devices = ['cpu']
model_name = model.strip(os.path.sep).replace(os.path.sep, '.')
# skip translated
src2out = OrderedDict()
for name in inputs:
output = os.path.join('translations',
f'{tag + "-" if tag else ""}{model_name}-{os.path.basename(name)}')
src2out[name] = output
if args.list_outputs:
for output in src2out.values():
print(output)
sys.exit(0)
pending = []
done = []
if force:
pending = inputs
else:
for s, o in src2out.items():
if os.path.exists(o) and count_lines(s) == count_lines(o):
# skip translated
done.append(s)
else:
pending.append(s)
for f in done:
sys.stderr.write('skip {}\n'.format(f))
translate(model, pending, done, src2out, devices, entry, extra, silent, write_stdout)
def valid_file(parser, arg):
if arg and not os.path.exists(arg):
parser.error('The file doesn\'t exist: {}'.format(arg))
else:
return arg
def parse_args():
parser = argparse.ArgumentParser()
file_type = lambda arg: valid_file(parser, arg)
parser.add_argument('model')
parser.add_argument('--entry', '-e', default='translate.py')
parser.add_argument('--inputs', '-i', type=file_type, nargs='+')
parser.add_argument('--list-outputs', action='store_true',
help='list output names in correspondence with given model and input files, then exit')
parser.add_argument('--tag', '-t', type=str)
parser.add_argument('--cuda', nargs='+', type=str,
help='e.g. --cuda 0 0 2 or --cuda cpu')
parser.add_argument('--silent', '-s', action="store_true", help='suppress procedural printings')
parser.add_argument('--stdout', action="store_true",
help='write to stdout instead of files, if True, suppress all irrelevant stdout')
parser.add_argument('--force', action='store_true', help='force overwrite existing translations')
args, remains = parser.parse_known_args()
args.remains = remains
return args
if __name__ == '__main__':
args = parse_args()
main(args)
|
clients.py
|
from . import AlertingClient
import re
import time
import requests
from threading import Thread
from typing import List, Union, Optional
from slackclient import SlackClient
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from telegram import Bot
class AlertingSlackClient(AlertingClient):
def __init__(self, bot_user_oauth: str, target_channel: str, user_access_token: Optional[str] = None):
assert bot_user_oauth is not None, 'Null bot user oauth provided for slack client'
assert target_channel is not None, 'Null channel provided for slack client'
super().__init__()
self.user_access_token = user_access_token
self.bot_user_oauth = bot_user_oauth
self.target_channel = target_channel
self.bot_client = SlackClient(bot_user_oauth)
if self.user_access_token is not None:
self.user_client = SlackClient(user_access_token)
thread = Thread(target=AlertingSlackClient.start_watching, args=(self.bot_client, self.user_client, self.user_access_token, self.target_channel))
thread.start()
def send_alert(self, title: str, message: str):
self.bot_client.api_call(
"chat.postMessage",
channel=self.target_channel,
text='Title: '+ title + '\n'+message
)
@staticmethod
def start_watching(bot_client: SlackClient, user_client: SlackClient, user_access_token: str, target_channel: str):
RTM_READ_DELAY = 1
if bot_client.rtm_connect():
bot_auth_test = bot_client.api_call("auth.test")
bot_id = bot_auth_test["user_id"]
user_auth_test = user_client.api_call('auth.test')
bot_user_id = user_auth_test['user_id']
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search("^<@(|[WU].+?)>(.*)", message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def parse_bot_commands(slack_events):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
for event in slack_events:
if event["type"] == "message" and not "subtype" in event:
user_id, message = parse_direct_mention(event["text"])
if user_id == bot_id:
return message, event["channel"]
return None, None
def handle_command(command, channel):
if command in ['clear', 'clear_channel', 'clear channel']:
bot_client.api_call(
"chat.postMessage",
channel=channel,
text='Affirmative, cleaning channel messages...'
)
# fetch history of messages in channel
all_messages_ts = []
messages = user_client.api_call(
'channels.history',
token=user_access_token,
channel=channel,
count=1000
)
if messages['ok'] is True:
for message in messages['messages']:
all_messages_ts.append(message['ts'])
for ts in list(reversed(all_messages_ts)):
delete_resp = user_client.api_call(
'chat.delete',
token=user_access_token,
channel=channel,
ts=ts,
as_user=True
)
else:
# Sends the default response back to the channel
bot_client.api_call(
"chat.postMessage",
channel=channel,
text="Not sure what you mean. Try *clear*."
)
while True:
command, channel = parse_bot_commands(bot_client.rtm_read())
if command:
handle_command(command, channel)
time.sleep(RTM_READ_DELAY)
else:
print("Connection failed. Exception traceback printed above.")
class AlertingMailGunClient(AlertingClient):
def __init__(self, api_key: str, domain: str, from_email: str, target_email: Union[str,List[str]]):
assert api_key is not None, 'Null api key passed for MailGun Client'
assert isinstance(api_key, str), 'Invalid api key passed for MailGun Client, needed str but found ' + str(type(api_key))
assert domain is not None, 'Null domain passed for MailGun Client'
assert isinstance(domain, str), 'Invalid domain passed for MailGun Client, needed str but found ' + str(type(api_key))
assert from_email is not None, 'Null from email passed for MailGun Client'
assert isinstance(from_email, str), 'Invalid from email passed for MailGun Client, needed str but found ' + str(type(from_email))
assert target_email is not None, 'Null target email passed for MailGun Client'
assert isinstance(target_email, str) or isinstance(target_email, list), 'Invalid target email passed for MailGun client, needed str but found ' + str(type(from_email))
if isinstance(target_email, list):
for target in target_email:
assert isinstance(target, str), 'Invalid email passed to MailGun Client, needed str but found ' + str(type(target))
super().__init__()
self.api_key = api_key
self.domain = domain
self.from_email = from_email
self.target_email = target_email
def send_alert(self, title: str, message: str):
return requests.post(
"https://api.mailgun.net/v3/" + self.domain + "/messages",
auth=("api", self.api_key),
data={"from": self.from_email,
"to": self.target_email if isinstance(self.target_email, list) else [self.target_email],
"subject": title,
"text": message})
class AlertingSendGridClient(AlertingClient):
def __init__(self, api_key: str, from_email: str, target_email: str):
assert api_key is not None, 'Null api key passed for SendGrid Client'
assert isinstance(api_key, str), 'Invalid api key passed for SendGrid Client, needed str but found ' + str(type(api_key))
assert from_email is not None, 'Null from email passed for SendGrid Client'
assert isinstance(from_email, str), 'Invalid from email passed for SendGrid Client, needed str but found ' + str(type(from_email))
assert target_email is not None, 'Null target email passed for SendGrid Client'
assert isinstance(target_email, str), 'Invalid target email passed for SendGrid client, needed str but found ' + str(type(from_email))
super().__init__()
self.api_key = api_key
self.from_email = from_email
self.target_email = target_email
def send_alert(self, title: str, message: str):
sg = SendGridAPIClient(api_key=self.api_key)
message = Mail(
from_email=self.from_email,
to_emails=self.target_email,
subject=title,
html_content=message)
response = sg.send(message)
return response
class AlertingTelegramClient(AlertingClient):
def __init__(self, token: str, chat_id: str):
assert token is not None, 'Null token passed for Telegram Client'
assert isinstance(token, str), 'Invalid token passed for Telegram Client, needed str but found ' + str(type(token))
assert chat_id is not None, 'Null chat id passed for Telegram Client'
assert isinstance(chat_id, str), 'Invalid chat id passed for Telegram Client, needed str but found ' + str(type(chat_id))
super().__init__()
self.token = token
self.chat_id = chat_id
def send_alert(self, title: str, message: str):
telegram_bot = Bot(token=self.token)
full_message = 'Title: ' + title + '\n' + message
telegram_bot.send_message(
chat_id=self.chat_id,
text=full_message)
|
main.py
|
"""
SubT Challenge Version 1
"""
import gc
import os.path
import math
import threading
import copy
from datetime import timedelta
from collections import defaultdict
from io import StringIO
import numpy as np
from osgar.explore import follow_wall_angle
from osgar.lib.mathex import normalizeAnglePIPI
from osgar.lib import quaternion
from osgar.lib.virtual_bumper import VirtualBumper
from osgar.lib.lidar_pts import equal_scans
from subt.local_planner import LocalPlanner
from subt.trace import Trace, distance3D
# safety limits for exploration and return home
LIMIT_ROLL = math.radians(28) # in Virtual Urban are ramps with 25deg slope
LIMIT_PITCH = math.radians(28)
RETURN_LIMIT_ROLL = math.radians(35)
RETURN_LIMIT_PITCH = math.radians(35)
# accepted LoRa commands
LORA_GO_HOME_CMD = b'GoHome'
LORA_STOP_CMD = b'Stop'
LORA_PAUSE_CMD = b'Pause'
LORA_CONTINUE_CMD = b'Continue'
# reasons for termination of follow wall
REASON_DIST_REACHED = 'dist_reached'
REASON_PITCH_LIMIT = 'pitch_limit'
REASON_ROLL_LIMIT = 'roll_limit'
REASON_VIRTUAL_BUMPER = 'virtual_bumper'
REASON_LORA = 'lora'
REASON_FRONT_BUMPER = 'front_bumper'
REASON_REAR_BUMPER = 'rear_bumper'
def min_dist(laser_data):
if len(laser_data) > 0:
# remove ultra near reflections and unlimited values == 0
laser_data = [x if x > 10 else 10000 for x in laser_data]
return min(laser_data)/1000.0
return 0
def distance(pose1, pose2):
return math.hypot(pose1[0] - pose2[0], pose1[1] - pose2[1])
class Collision(Exception):
pass
class EmergencyStopException(Exception):
pass
class EmergencyStopMonitor:
def __init__(self, robot):
self.robot = robot
def update(self, robot):
if robot.emergency_stop:
raise EmergencyStopException()
# handle STOP independently of current subroutine
if robot.lora_cmd == LORA_STOP_CMD:
print(robot.time, 'LoRa cmd - Stop')
raise EmergencyStopException()
# context manager functions
def __enter__(self):
self.callback = self.robot.register(self.update)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.robot.unregister(self.callback)
class SubTChallenge:
def __init__(self, config, bus):
self.bus = bus
bus.register("desired_speed", "pose2d", "artf_xyz", "pose3d", "stdout", "request_origin")
self.start_pose = None
self.traveled_dist = 0.0
self.time = None
self.max_speed = config['max_speed']
self.max_angular_speed = math.radians(60)
self.walldist = config['walldist']
self.timeout = timedelta(seconds=config['timeout'])
self.symmetric = config['symmetric'] # is robot symmetric?
self.dangerous_dist = config.get('dangerous_dist', 0.3)
self.min_safe_dist = config.get('min_safe_dist', 0.75)
self.safety_turning_coeff = config.get('safety_turning_coeff', 0.8)
virtual_bumper_sec = config.get('virtual_bumper_sec')
self.virtual_bumper = None
if virtual_bumper_sec is not None:
virtual_bumper_radius = config.get('virtual_bumper_radius', 0.1)
self.virtual_bumper = VirtualBumper(timedelta(seconds=virtual_bumper_sec), virtual_bumper_radius)
self.last_position = (0, 0, 0) # proper should be None, but we really start from zero
self.xyz = (0, 0, 0) # 3D position for mapping artifacts
self.xyz_quat = [0, 0, 0]
self.orientation = quaternion.identity()
self.yaw, self.pitch, self.roll = 0, 0, 0
self.yaw_offset = None # not defined, use first IMU reading
self.is_moving = None # unknown
self.scan = None # I should use class Node instead
self.flipped = False # by default use only front part
self.joint_angle_rad = [] # optinal angles, needed for articulated robots flip
self.stat = defaultdict(int)
self.voltage = []
self.artifacts = []
self.trace = Trace()
self.collision_detector_enabled = False
self.sim_time_sec = 0
self.lora_cmd = None
self.emergency_stop = None
self.monitors = [] # for Emergency Stop Exception
self.use_right_wall = config['right_wall']
self.use_center = False # navigate into center area (controlled by name ending by 'C')
self.is_virtual = config.get('virtual_world', False) # workaround to handle tunnel differences
self.front_bumper = False
self.rear_bumper = False
self.last_send_time = None
self.origin = None # unknown initial position
self.origin_quat = quaternion.identity()
self.offset = (0, 0, 0)
if 'init_offset' in config:
x, y, z = [d/1000.0 for d in config['init_offset']]
self.offset = (x, y, z)
self.init_path = None
if 'init_path' in config:
pts_s = [s.split(',') for s in config['init_path'].split(';')]
self.init_path = [(float(x), float(y)) for x, y in pts_s]
self.origin_error = False
self.robot_name = None # received with origin
scan_subsample = config.get('scan_subsample', 1)
obstacle_influence = config.get('obstacle_influence', 0.8)
direction_adherence = math.radians(config.get('direction_adherence', 90))
self.local_planner = LocalPlanner(
obstacle_influence=obstacle_influence,
direction_adherence=direction_adherence,
max_obstacle_distance=2.5,
scan_subsample=scan_subsample,
max_considered_obstacles=100)
self.use_return_trace = config.get('use_return_trace', True)
self.ref_scan = None
self.pause_start_time = None
if config.get('start_paused', False):
self.pause_start_time = timedelta() # paused from the very beginning
def send_speed_cmd(self, speed, angular_speed):
if self.virtual_bumper is not None:
self.virtual_bumper.update_desired_speed(speed, angular_speed)
self.bus.publish('desired_speed', [round(speed*1000), round(math.degrees(angular_speed)*100)])
# Corresponds to gc.disable() in __main__. See a comment there for more details.
gc.collect()
def maybe_remember_artifact(self, artifact_data, artifact_xyz):
for stored_data, (x, y, z) in self.artifacts:
if distance3D((x, y, z), artifact_xyz) < 4.0:
# in case of uncertain type, rather report both
if stored_data == artifact_data:
return False
self.artifacts.append((artifact_data, artifact_xyz))
return True
def go_straight(self, how_far, timeout=None):
print(self.time, "go_straight %.1f (speed: %.1f)" % (how_far, self.max_speed), self.last_position)
start_pose = self.last_position
if how_far >= 0:
self.send_speed_cmd(self.max_speed, 0.0)
else:
self.send_speed_cmd(-self.max_speed, 0.0)
start_time = self.time
while distance(start_pose, self.last_position) < abs(how_far):
self.update()
if timeout is not None and self.time - start_time > timeout:
print("go_straight - TIMEOUT!")
break
self.send_speed_cmd(0.0, 0.0)
def go_safely(self, desired_direction):
if self.local_planner is None:
safety, safe_direction = 1.0, desired_direction
else:
safety, safe_direction = self.local_planner.recommend(desired_direction)
#print(self.time,"safety:%f desired:%f safe_direction:%f"%(safety, desired_direction, safe_direction))
#desired_angular_speed = 1.2 * safe_direction
desired_angular_speed = 0.9 * safe_direction
size = len(self.scan)
dist = min_dist(self.scan[size//3:2*size//3])
if dist < self.min_safe_dist: # 2.0:
# desired_speed = self.max_speed * (1.2/2.0) * (dist - 0.4) / 1.6
desired_speed = self.max_speed * (dist - self.dangerous_dist) / (self.min_safe_dist - self.dangerous_dist)
else:
desired_speed = self.max_speed # was 2.0
desired_speed = desired_speed * (1.0 - self.safety_turning_coeff * min(self.max_angular_speed, abs(desired_angular_speed)) / self.max_angular_speed)
if self.flipped:
self.send_speed_cmd(-desired_speed, desired_angular_speed) # ??? angular too??!
else:
self.send_speed_cmd(desired_speed, desired_angular_speed)
return safety
def turn(self, angle, with_stop=True, speed=0.0, timeout=None):
print(self.time, "turn %.1f" % math.degrees(angle))
start_pose = self.last_position
if angle >= 0:
self.send_speed_cmd(speed, self.max_angular_speed)
else:
self.send_speed_cmd(speed, -self.max_angular_speed)
start_time = self.time
while abs(normalizeAnglePIPI(start_pose[2] - self.last_position[2])) < abs(angle):
self.update()
if timeout is not None and self.time - start_time > timeout:
print(self.time, "turn - TIMEOUT!")
break
if with_stop:
self.send_speed_cmd(0.0, 0.0)
start_time = self.time
while self.time - start_time < timedelta(seconds=2):
self.update()
if not self.is_moving:
break
print(self.time, 'stop at', self.time - start_time)
def stop(self):
self.send_speed_cmd(0.0, 0.0)
start_time = self.time
while self.time - start_time < timedelta(seconds=20):
self.update()
if not self.is_moving:
break
print(self.time, 'stop at', self.time - start_time, self.is_moving)
def follow_wall(self, radius, right_wall=False, timeout=timedelta(hours=3), dist_limit=None, flipped=False,
pitch_limit=None, roll_limit=None):
# make sure that we will start with clean data
if flipped:
self.scan = None
self.flipped = True
reason = None # termination reason is not defined yet
start_dist = self.traveled_dist
start_time = self.sim_time_sec
last_pause_time = timedelta() # for multiple Pause
current_pause_time = timedelta()
while self.sim_time_sec - start_time < (timeout + last_pause_time + current_pause_time).total_seconds():
try:
channel = self.update()
if (channel == 'scan' and not self.flipped) or (channel == 'scan_back' and self.flipped) or channel == 'scan360':
if self.pause_start_time is None:
if self.use_center:
desired_direction = 0
else:
desired_direction = follow_wall_angle(self.scan, radius=radius, right_wall=right_wall)
self.go_safely(desired_direction)
if dist_limit is not None:
if dist_limit < abs(self.traveled_dist - start_dist): # robot can return backward -> abs()
print(self.time, 'Distance limit reached! At', self.traveled_dist, self.traveled_dist - start_dist)
reason = REASON_DIST_REACHED
break
if pitch_limit is not None and self.pitch is not None:
if abs(self.pitch) > pitch_limit:
print(self.time, 'Pitch limit triggered termination: (pitch %.1f)' % math.degrees(self.pitch))
reason = REASON_PITCH_LIMIT
break
if roll_limit is not None and self.roll is not None:
if abs(self.roll) > roll_limit:
print(self.time, 'Roll limit triggered termination: (roll %.1f)' % math.degrees(self.roll))
reason = REASON_ROLL_LIMIT
break
if self.virtual_bumper is not None and self.virtual_bumper.collision():
print(self.time, "VIRTUAL BUMPER - collision")
self.go_straight(-0.3, timeout=timedelta(seconds=10))
reason = REASON_VIRTUAL_BUMPER
break
if self.front_bumper and not flipped:
print(self.time, "FRONT BUMPER - collision")
self.go_straight(-0.3, timeout=timedelta(seconds=10))
reason = REASON_FRONT_BUMPER
break
if self.rear_bumper and flipped:
print(self.time, "REAR BUMPER - collision")
self.go_straight(-0.3, timeout=timedelta(seconds=10))
reason = REASON_REAR_BUMPER
break
if self.lora_cmd is not None:
# the "GoHome" command must be accepted only on the way there and not on the return home
if dist_limit is None and self.lora_cmd == LORA_GO_HOME_CMD:
print(self.time, 'LoRa cmd - GoHome')
self.lora_cmd = None
reason = REASON_LORA
break
if self.lora_cmd == LORA_PAUSE_CMD:
print(self.time, 'LoRa cmd - Pause')
self.send_speed_cmd(0, 0)
if self.pause_start_time is None:
# ignore repeated Pause
self.pause_start_time = self.time
self.lora_cmd = None
elif self.lora_cmd == LORA_CONTINUE_CMD:
print(self.time, 'LoRa cmd - Continue')
if self.pause_start_time is not None:
# ignore Continue without Pause
last_pause_time += self.time - self.pause_start_time
self.pause_start_time = None
self.lora_cmd = None
if self.pause_start_time is not None:
current_pause_time = self.time - self.pause_start_time
else:
current_pause_time = timedelta()
except Collision:
assert not self.collision_detector_enabled # collision disables further notification
before_stop = self.xyz
self.stop()
after_stop = self.xyz
print("Pose Jump:", before_stop, after_stop)
self.xyz = before_stop
self.go_straight(-1)
self.stop()
if right_wall:
turn_angle = math.pi / 2
else:
turn_angle = -math.pi / 2
self.turn(turn_angle, with_stop=True)
self.go_straight(1.5)
self.stop()
self.turn(-turn_angle, with_stop=True)
self.go_straight(1.5)
self.stop()
self.collision_detector_enabled = True
self.scan = None
self.flipped = False
return self.traveled_dist - start_dist, reason
def return_home(self, timeout, home_threshold=None):
if home_threshold is None:
HOME_THRESHOLD = 5.0
else:
HOME_THRESHOLD = home_threshold
SHORTCUT_RADIUS = 2.3
MAX_TARGET_DISTANCE = 5.0
MIN_TARGET_DISTANCE = 1.0
assert(MAX_TARGET_DISTANCE > SHORTCUT_RADIUS) # Because otherwise we could end up with a target point more distant from home than the robot.
print('Wait and get ready for return')
self.send_speed_cmd(0, 0)
self.wait(dt=timedelta(seconds=3.0))
original_trace = copy.deepcopy(self.trace)
self.trace.prune(SHORTCUT_RADIUS)
self.wait(dt=timedelta(seconds=2.0))
print('done.')
start_time = self.sim_time_sec
target_distance = MAX_TARGET_DISTANCE
count_down = 0
while distance3D(self.xyz, (0, 0, 0)) > HOME_THRESHOLD and self.sim_time_sec - start_time < timeout.total_seconds():
channel = self.update()
if (channel == 'scan' and not self.flipped) or (channel == 'scan_back' and self.flipped) or (channel == 'scan360'):
if target_distance == MIN_TARGET_DISTANCE:
target_x, target_y = original_trace.where_to(self.xyz, target_distance)[:2]
else:
target_x, target_y = self.trace.where_to(self.xyz, target_distance)[:2]
# print(self.time, self.xyz, (target_x, target_y), math.degrees(self.yaw))
x, y = self.xyz[:2]
desired_direction = math.atan2(target_y - y, target_x - x) - self.yaw
if self.flipped:
desired_direction += math.pi # symmetry
for angle in self.joint_angle_rad:
desired_direction -= angle
safety = self.go_safely(desired_direction)
if safety < 0.2:
print(self.time, "Safety low!", safety, desired_direction)
target_distance = MIN_TARGET_DISTANCE
count_down = 300
if count_down > 0:
count_down -= 1
if count_down == 0:
target_distance = MAX_TARGET_DISTANCE
print(self.time, "Recovery to original", target_distance)
print('return_home: dist', distance3D(self.xyz, (0, 0, 0)), 'time(sec)', self.sim_time_sec - start_time)
def follow_trace(self, trace, timeout, max_target_distance=5.0, safety_limit=None):
print('Follow trace')
END_THRESHOLD = 2.0
start_time = self.sim_time_sec
print('MD', self.xyz, distance3D(self.xyz, trace.trace[0]), trace.trace)
while distance3D(self.xyz, trace.trace[0]) > END_THRESHOLD and self.sim_time_sec - start_time < timeout.total_seconds():
if self.update() == 'scan':
target_x, target_y = trace.where_to(self.xyz, max_target_distance)[:2]
x, y = self.xyz[:2]
# print((x, y), (target_x, target_y))
desired_direction = math.atan2(target_y - y, target_x - x) - self.yaw
safety = self.go_safely(desired_direction)
if safety_limit is not None and safety < safety_limit:
print('Danger! Safety limit for follow trace reached!', safety, safety_limit)
break
print('End of follow trace(sec)', self.sim_time_sec - start_time)
def register(self, callback):
self.monitors.append(callback)
return callback
def unregister(self, callback):
assert callback in self.monitors
self.monitors.remove(callback)
def on_pose2d(self, timestamp, data):
x, y, heading = data
pose = (x / 1000.0, y / 1000.0, math.radians(heading / 100.0))
if self.last_position is not None:
self.is_moving = (self.last_position != pose)
dist = math.hypot(pose[0] - self.last_position[0], pose[1] - self.last_position[1])
direction = ((pose[0] - self.last_position[0]) * math.cos(self.last_position[2]) +
(pose[1] - self.last_position[1]) * math.sin(self.last_position[2]))
if direction < 0:
dist = -dist
else:
dist = 0.0
self.last_position = pose
if self.start_pose is None:
self.start_pose = pose
self.traveled_dist += dist
x, y, z = self.xyz
x += math.cos(self.pitch) * math.cos(self.yaw) * dist
y += math.cos(self.pitch) * math.sin(self.yaw) * dist
z += math.sin(self.pitch) * dist
x0, y0, z0 = self.offset
self.last_send_time = self.bus.publish('pose2d', [round((x + x0) * 1000), round((y + y0) * 1000),
round(math.degrees(self.yaw) * 100)])
if self.virtual_bumper is not None:
if self.is_virtual:
self.virtual_bumper.update_pose(timedelta(seconds=self.sim_time_sec), pose)
else:
self.virtual_bumper.update_pose(self.time, pose)
self.xyz = x, y, z
self.trace.update_trace(self.xyz)
# pose3d
dist3d = quaternion.rotate_vector([dist, 0, 0], self.orientation)
self.xyz_quat = [a + b for a, b in zip(self.xyz_quat, dist3d)]
xyz_quat = [p + o for p, o in zip(self.xyz_quat, self.offset)]
self.bus.publish('pose3d', [xyz_quat, self.orientation])
def on_acc(self, timestamp, data):
acc = [x / 1000.0 for x in data]
gacc = np.matrix([[0., 0., 9.80]]) # Gravitational acceleration.
cos_pitch = math.cos(self.pitch)
sin_pitch = math.sin(self.pitch)
# TODO: Once roll is correct, incorporate it here too.
egacc = np.matrix([ # Expected gravitational acceleration given known pitch.
[cos_pitch, 0., sin_pitch],
[0., 1., 0.],
[-sin_pitch, 0., cos_pitch]
]) * gacc.T
cacc = np.asarray(acc) - egacc.T # Corrected acceleration (without gravitational acceleration).
magnitude = math.hypot(cacc[0, 0], cacc[0, 1])
# used to be 12 - see https://bitbucket.org/osrf/subt/issues/166/expected-x2-acceleration
if magnitude > 200: #18.0:
print(self.time, 'Collision!', acc, 'reported:', self.collision_detector_enabled)
if self.collision_detector_enabled:
self.collision_detector_enabled = False
raise Collision()
def on_artf(self, timestamp, data):
artifact_data, deg_100th, dist_mm = data
x, y, z = self.xyz
x0, y0, z0 = self.offset
angle, dist = self.yaw + math.radians(deg_100th / 100.0), dist_mm / 1000.0
ax = x0 + x + math.cos(angle) * dist
ay = y0 + y + math.sin(angle) * dist
az = z0 + z
if -20 < ax < 0 and -10 < ay < 10:
# filter out elements on staging area
self.stdout(self.time, 'Robot at:', (ax, ay, az))
else:
if self.maybe_remember_artifact(artifact_data, (ax, ay, az)):
self.bus.publish('artf_xyz', [[artifact_data, round(ax*1000), round(ay*1000), round(az*1000)]])
def on_joint_angle(self, timestamp, data):
# angles for articulated robot in 1/100th of degree
self.joint_angle_rad = [math.radians(a/100) for a in data]
def on_bumpers_front(self, timestamp, data):
self.front_bumper = max(data) # array of boolean values where True means collision
def on_bumpers_rear(self, timestamp, data):
self.rear_bumper = max(data) # array of boolean values where True means collision
def update(self):
packet = self.bus.listen()
if packet is not None:
# print('SubT', packet)
timestamp, channel, data = packet
if self.time is None or int(self.time.seconds)//60 != int(timestamp.seconds)//60:
self.stdout(timestamp, '(%.1f %.1f %.1f)' % self.xyz, sorted(self.stat.items()))
print(timestamp, list(('%.1f' % (v/100)) for v in self.voltage))
self.stat.clear()
self.time = timestamp
if not self.is_virtual:
self.sim_time_sec = self.time.total_seconds()
self.stat[channel] += 1
handler = getattr(self, "on_" + channel, None)
if handler is not None:
handler(timestamp, data)
elif channel == 'scan' and not self.flipped:
if self.last_send_time is not None and self.last_send_time - self.time > timedelta(seconds=0.1):
print('queue delay', self.last_send_time - self.time)
self.scan = data
if self.ref_scan is None or not equal_scans(self.scan, self.ref_scan, 200):
self.ref_scan = self.scan
self.ref_count = 0
else:
self.ref_count += 1
if self.ref_count > 300:
print('Robot is stuck!', self.ref_count)
if self.collision_detector_enabled:
self.collision_detector_enabled = False
raise Collision()
self.ref_count = 0
if self.local_planner is not None:
self.local_planner.update(data)
elif channel == 'scan_back' and self.flipped:
self.scan = data
if self.local_planner is not None:
self.local_planner.update(data)
elif channel == 'scan360':
# reduce original 360 degrees scan to 270 degrees oriented forward or backward
index45deg = int(round(len(data)/8))
if self.flipped:
mid = len(data)//2
self.scan = (data[mid:]+data[:mid])[index45deg:-index45deg]
else:
self.scan = data[index45deg:-index45deg]
if self.local_planner is not None:
self.local_planner.update(data)
elif channel == 'rot':
temp_yaw, self.pitch, self.roll = [normalizeAnglePIPI(math.radians(x/100)) for x in data]
if self.yaw_offset is None:
self.yaw_offset = -temp_yaw
self.yaw = temp_yaw + self.yaw_offset
elif channel == 'orientation':
self.orientation = data
elif channel == 'sim_time_sec':
self.sim_time_sec = data
elif channel == 'origin':
if self.origin is None: # accept only initial offset
self.robot_name = data[0].decode('ascii')
if len(data) == 8:
self.origin = data[1:4]
qx, qy, qz, qw = data[4:]
self.origin_quat = qx, qy, qz, qw # quaternion
else:
self.stdout('Origin ERROR received')
self.origin_error = True
elif channel == 'voltage':
self.voltage = data
elif channel == 'emergency_stop':
self.emergency_stop = data
elif channel == 'cmd':
self.lora_cmd = data
for m in self.monitors:
m(self)
return channel
def wait(self, dt, use_sim_time=False): # TODO refactor to some common class
if use_sim_time:
start_sim_time_sec = self.sim_time_sec
while self.sim_time_sec - start_sim_time_sec < dt.total_seconds():
self.update()
else:
if self.time is None:
self.update()
start_time = self.time
while self.time - start_time < dt:
self.update()
def stdout(self, *args, **kwargs):
output = StringIO()
print(*args, file=output, **kwargs)
contents = output.getvalue().strip()
output.close()
self.bus.publish('stdout', contents)
print(contents)
#############################################
def system_nav_trace(self, path=None):
"""
Navigate along line
"""
dx, dy, __ = self.offset
trace = Trace()
trace.add_line_to((-dx, -dy, 0))
if path is not None:
for x, y in path:
trace.add_line_to((x - dx, y - dy, 0))
trace.reverse()
self.follow_trace(trace, timeout=timedelta(seconds=120), max_target_distance=2.5, safety_limit=0.2)
def robust_follow_wall(self, radius, right_wall=False, timeout=timedelta(hours=3), dist_limit=None, flipped=False,
pitch_limit=None, roll_limit=None):
"""
Handle multiple re-tries with increasing distance from the wall if necessary
"""
allow_virtual_flip = self.symmetric
walldist = self.walldist
total_dist = 0.0
start_time = self.sim_time_sec
overall_timeout = timeout
while self.sim_time_sec - start_time < overall_timeout.total_seconds():
if self.sim_time_sec - start_time > overall_timeout.total_seconds():
print('Total Timeout Reached', overall_timeout.total_seconds())
break
timeout = timedelta(seconds=overall_timeout.total_seconds() - (self.sim_time_sec - start_time))
print('Current timeout', timeout)
dist, reason = self.follow_wall(radius=walldist, right_wall=right_wall, timeout=timeout, flipped=flipped,
pitch_limit=LIMIT_PITCH, roll_limit=LIMIT_ROLL)
total_dist += dist
if reason is None or reason in [REASON_LORA,]:
break
walldist += 0.2
if not allow_virtual_flip:
# Eduro not supported yet
if reason in [REASON_VIRTUAL_BUMPER,]:
# virtual bumper already tried to backup a bit
continue
# for large slope rather return home
break
# re-try with bigger distance
print(self.time, "Re-try because of", reason)
for repeat in range(2):
self.follow_wall(radius=walldist, right_wall=not right_wall, timeout=timedelta(seconds=30), dist_limit=2.0,
flipped=not flipped, pitch_limit=RETURN_LIMIT_PITCH, roll_limit=RETURN_LIMIT_ROLL)
dist, reason = self.follow_wall(radius=walldist, right_wall=right_wall, timeout=timedelta(seconds=40), dist_limit=4.0,
pitch_limit=LIMIT_PITCH, roll_limit=LIMIT_ROLL, flipped=flipped)
total_dist += dist
if reason is None:
break
if reason in [REASON_LORA, REASON_DIST_REACHED]:
break
walldist += 0.2
walldist = self.walldist
if reason in [REASON_LORA,]:
break
def play_system_track(self):
print("SubT Challenge Ver1!")
try:
with EmergencyStopMonitor(self):
allow_virtual_flip = self.symmetric
if distance(self.offset, (0, 0)) > 0.1 or self.init_path is not None:
self.system_nav_trace(self.init_path)
# self.go_straight(2.5) # go to the tunnel entrance - commented our for testing
walldist = self.walldist
total_dist = 0.0
start_time = self.sim_time_sec
while self.sim_time_sec - start_time < self.timeout.total_seconds():
if self.sim_time_sec - start_time > self.timeout.total_seconds():
print('Total Timeout Reached', self.timeout.total_seconds())
break
timeout = timedelta(seconds=self.timeout.total_seconds() - (self.sim_time_sec - start_time))
print('Current timeout', timeout)
dist, reason = self.follow_wall(radius=walldist, right_wall=self.use_right_wall, timeout=timeout,
pitch_limit=LIMIT_PITCH, roll_limit=LIMIT_ROLL)
total_dist += dist
if reason is None or reason in [REASON_LORA,]:
break
walldist += 0.2
if not allow_virtual_flip:
# Eduro not supported yet
if reason in [REASON_VIRTUAL_BUMPER,]:
# virtual bumper already tried to backup a bit
continue
# for large slope rather return home
break
# re-try with bigger distance
print(self.time, "Re-try because of", reason)
for repeat in range(2):
self.follow_wall(radius=walldist, right_wall=not self.use_right_wall, timeout=timedelta(seconds=30), dist_limit=2.0,
flipped=allow_virtual_flip, pitch_limit=RETURN_LIMIT_PITCH, roll_limit=RETURN_LIMIT_ROLL)
dist, reason = self.follow_wall(radius=walldist, right_wall=self.use_right_wall, timeout=timedelta(seconds=40), dist_limit=4.0,
pitch_limit=LIMIT_PITCH, roll_limit=LIMIT_ROLL)
total_dist += dist
if reason is None:
break
if reason in [REASON_LORA, REASON_DIST_REACHED]:
break
walldist += 0.2
walldist = self.walldist
if reason in [REASON_LORA,]:
break
if self.use_return_trace and self.local_planner is not None:
self.stdout(self.time, "Going HOME %.3f" % dist, reason)
if allow_virtual_flip:
self.flipped = True # return home backwards
self.scan = None
self.return_home(2 * self.timeout, home_threshold=1.0)
self.send_speed_cmd(0, 0)
else:
print(self.time, "Going HOME", reason)
if not allow_virtual_flip:
self.turn(math.radians(90), timeout=timedelta(seconds=20))
self.turn(math.radians(90), timeout=timedelta(seconds=20))
self.robust_follow_wall(radius=self.walldist, right_wall=not self.use_right_wall, timeout=3*self.timeout, dist_limit=3*total_dist,
flipped=allow_virtual_flip, pitch_limit=RETURN_LIMIT_PITCH, roll_limit=RETURN_LIMIT_ROLL)
if self.artifacts:
self.bus.publish('artf_xyz', [[artifact_data, round(x*1000), round(y*1000), round(z*1000)]
for artifact_data, (x, y, z) in self.artifacts])
except EmergencyStopException:
print(self.time, "EMERGENCY STOP - terminating")
self.send_speed_cmd(0, 0)
self.wait(timedelta(seconds=3))
#############################################
def go_to_entrance(self):
"""
Navigate to the base station tile end
"""
dx, dy, __ = self.offset
trace = Trace() # starts by default at (0, 0, 0) and the robots are placed X = -7.5m (variable Y)
trace.add_line_to((-4.5 - dx, -dy, 0)) # in front of the tunnel/entrance
trace.add_line_to((2.5 - dx, -dy, 0)) # 2.5m inside
trace.reverse()
self.follow_trace(trace, timeout=timedelta(seconds=30), max_target_distance=2.5, safety_limit=0.2)
def play_virtual_part(self):
self.stdout("Waiting for origin ...")
self.origin = None # invalidate origin
self.origin_error = False
self.bus.publish('request_origin', True)
while self.origin is None and not self.origin_error:
self.update()
self.stdout('Origin:', self.origin, self.robot_name)
if self.origin is not None:
x, y, z = self.origin
x1, y1, z1 = self.xyz
self.offset = x - x1, y - y1, z - z1
self.stdout('Offset:', self.offset)
heading = quaternion.heading(self.origin_quat)
self.stdout('heading', math.degrees(heading), 'angle', math.degrees(math.atan2(-y, -x)), 'dist', math.hypot(x, y))
self.go_to_entrance()
else:
# lost in tunnel
self.stdout('Lost in tunnel:', self.origin_error, self.offset)
start_time = self.sim_time_sec
for loop in range(100):
self.collision_detector_enabled = True
if self.sim_time_sec - start_time > self.timeout.total_seconds():
print('Total Timeout Reached', self.timeout.total_seconds())
break
timeout = timedelta(seconds=self.timeout.total_seconds() - (self.sim_time_sec - start_time))
print('Current timeout', timeout)
dist, reason = self.follow_wall(radius=self.walldist, right_wall=self.use_right_wall, # was radius=0.9
timeout=timeout, pitch_limit=LIMIT_PITCH, roll_limit=None)
self.collision_detector_enabled = False
if reason == REASON_VIRTUAL_BUMPER:
assert self.virtual_bumper is not None
self.virtual_bumper.reset_counters()
# the robot ended in cycle probably
self.return_home(timedelta(seconds=10))
# try something crazy if you do not have other ideas ...
before_center = self.use_center
self.use_center = True
dist, reason = self.follow_wall(radius=self.walldist, right_wall=self.use_right_wall, # was radius=0.9
timeout=timedelta(seconds=60), pitch_limit=LIMIT_PITCH, roll_limit=None)
self.use_center = before_center
if reason is None or reason != REASON_PITCH_LIMIT:
continue
if reason is None or reason != REASON_PITCH_LIMIT:
break
self.stdout(self.time, "Microstep HOME %d %.3f" % (loop, dist), reason)
self.go_straight(-0.3, timeout=timedelta(seconds=10))
self.return_home(timedelta(seconds=10))
self.stdout("Artifacts:", self.artifacts)
self.stdout(self.time, "Going HOME %.3f" % dist, reason)
self.return_home(2 * self.timeout)
self.send_speed_cmd(0, 0)
if self.artifacts:
self.bus.publish('artf_xyz', [[artifact_data, round(x*1000), round(y*1000), round(z*1000)]
for artifact_data, (x, y, z) in self.artifacts])
self.wait(timedelta(seconds=10), use_sim_time=True)
def dumplog(self):
import os
filename = self.bus.logger.filename # deep hack
self.stdout("Dump Log:", filename)
size = statinfo = os.stat(filename).st_size
self.stdout("Size:", size)
with open(filename, 'rb') as f:
for i in range(0, size, 100):
self.stdout(i, f.read(100))
self.stdout("Dump END")
def play_virtual_track(self):
self.stdout("SubT Challenge Ver64!")
self.stdout("Waiting for robot_name ...")
while self.robot_name is None:
self.update()
self.stdout('robot_name:', self.robot_name)
if self.use_right_wall == 'auto':
self.use_right_wall = self.robot_name.endswith('R')
self.use_center = self.robot_name.endswith('C')
self.stdout('Use right wall:', self.use_right_wall)
times_sec = [int(x) for x in self.robot_name[1:-1].split('F')]
self.stdout('Using times', times_sec)
# add extra sleep to give a chance to the other robot (based on name)
self.wait(timedelta(seconds=times_sec[0]), use_sim_time=True)
# potential wrong artifacts:
self.stdout('Artifacts before start:', self.artifacts)
for timeout_sec in times_sec[1:]:
self.timeout = timedelta(seconds=timeout_sec)
self.play_virtual_part()
self.stdout('Final xyz:', self.xyz)
x, y, z = self.xyz
x0, y0, z0 = self.offset
self.stdout('Final xyz (DARPA coord system):', (x + x0, y + y0, z + z0))
self.wait(timedelta(seconds=30), use_sim_time=True)
# self.dumplog()
# self.wait(timedelta(seconds=10), use_sim_time=True)
#############################################
def play(self):
if self.is_virtual:
return self.play_virtual_track()
else:
return self.play_system_track()
def start(self):
self.thread = threading.Thread(target=self.play)
self.thread.start()
def is_alive(self):
return self.thread.is_alive()
def request_stop(self):
self.bus.shutdown()
def join(self, timeout=None):
self.thread.join(timeout)
def main():
import argparse
from osgar.lib.config import config_load
from osgar.record import record
parser = argparse.ArgumentParser(description='SubT Challenge')
subparsers = parser.add_subparsers(help='sub-command help', dest='command')
subparsers.required = True
parser_run = subparsers.add_parser('run', help='run on real HW')
parser_run.add_argument('config', nargs='+', help='configuration file')
parser_run.add_argument('--note', help='add description')
parser_run.add_argument('--walldist', help='distance for wall following (default: %(default)sm)', default=1.0, type=float)
parser_run.add_argument('--side', help='which side to follow', choices=['left', 'right', 'auto'], required=True)
parser_run.add_argument('--speed', help='maximum speed (default: from config)', type=float)
parser_run.add_argument('--timeout', help='seconds of exploring before going home (default: %(default)s)',
type=int, default=10*60)
parser_run.add_argument('--log', nargs='?', help='record log filename')
parser_run.add_argument('--init-offset', help='inital 3D offset accepted as a string of comma separated values (meters)')
parser_run.add_argument('--init-path', help='inital path to be followed from (0, 0). 2D coordinates are separated by ;')
parser_run.add_argument('--start-paused', dest='start_paused', action='store_true',
help='start robota Paused and wait for LoRa Contine command')
parser_replay = subparsers.add_parser('replay', help='replay from logfile')
parser_replay.add_argument('logfile', help='recorded log file')
parser_replay.add_argument('--force', '-F', dest='force', action='store_true', help='force replay even for failing output asserts')
parser_replay.add_argument('--config', nargs='+', help='force alternative configuration file')
args = parser.parse_args()
if args.command == 'replay':
from osgar.replay import replay
args.module = 'app'
app = replay(args, application=SubTChallenge)
app.play()
elif args.command == 'run':
# To reduce latency spikes as described in https://morepypy.blogspot.com/2019/01/pypy-for-low-latency-systems.html.
# Increased latency leads to uncontrolled behavior and robot either missing turns or hitting walls.
# Disabled garbage collection needs to be paired with gc.collect() at place(s) that are not time sensitive.
gc.disable()
cfg = config_load(*args.config, application=SubTChallenge)
# apply overrides from command line
cfg['robot']['modules']['app']['init']['walldist'] = args.walldist
if args.side == 'auto':
cfg['robot']['modules']['app']['init']['right_wall'] = 'auto'
else:
cfg['robot']['modules']['app']['init']['right_wall'] = args.side == 'right'
cfg['robot']['modules']['app']['init']['timeout'] = args.timeout
if args.init_offset is not None:
x, y, z = [float(x) for x in args.init_offset.split(',')]
cfg['robot']['modules']['app']['init']['init_offset'] = [int(x*1000), int(y*1000), int(z*1000)]
if args.init_path is not None:
cfg['robot']['modules']['app']['init']['init_path'] = args.init_path
if args.speed is not None:
cfg['robot']['modules']['app']['init']['max_speed'] = args.speed
cfg['robot']['modules']['app']['init']['start_paused'] = args.start_paused
prefix = os.path.basename(args.config[0]).split('.')[0] + '-'
record(cfg, prefix, args.log)
if __name__ == "__main__":
main()
# vim: expandtab sw=4 ts=4
|
nettle.py
|
#!/usr/bin/python2
import urllib2
import threading
import logging
from time import sleep
class NetTle:
def __init__(self, tle_handler, config):
self.log = logging.getLogger('boresight')
self.tle_handler = tle_handler
self.config = config
self.satellites = self.config['sats']
self.url = self.config['tle_url']
self._start()
self._running = True
self._worker = threading.Thread(target=self._tle_worker)
self._worker.setDaemon(True)
self._worker.start()
def group_tles(self, lst, n):
for i in xrange(0, len(lst), n):
yield lst[i:i+n]
def read_tle(self, url):
response = urllib2.urlopen(url)
data = response.read()
data = data.replace("\r\n", "\n")
data = data.split('\n')
s = self.group_tles(data, 3)
return s
def get_tle(self, url, name):
data = self.read_tle(url)
element = filter(lambda x: x[0].find(name) != -1, data)
return element
def get_all_tle(self, url):
tles = []
s = self.read_tle(url)
for element in s:
tles.append(element)
return tles
def get_selected_tle(self, satellites, url):
elements = []
s = self.read_tle(url)
for element in s:
if (element[0] in satellites):
elements.append(element)
return elements
def exit(self):
self._running = False
self._stop()
def _start(self):
self.log.info("Staring TLE Module.")
def _stop(self):
self.log.info("Stopping TLE Module.")
def _tle_worker(self):
"""
Runs as a thread
"""
while self._running:
try:
elements = self.get_selected_tle(self.satellites, self.url)
self.log.info("Downloading elements.")
if (len(elements) > 0):
self.tle_handler(elements)
sleep(300.0)
except Exception as inst:
print type(inst) # the exception instance
print inst.args # arguments stored in .args
print inst # __str__ allows args to be printed directly
|
threads2.py
|
# Python program to illustrate the concept
# of threading
import threading
import os
def task1():
print("Task 1 assigned to thread: {}".format(threading.current_thread().name))
print("ID of process running task 1: {}".format(os.getpid()))
def task2():
print("Task 2 assigned to thread: {}".format(threading.current_thread().name))
print("ID of process running task 2: {}".format(os.getpid()))
if __name__ == "__main__":
# print ID of current process
print("ID of process running main program: {}".format(os.getpid()))
# print name of main thread
print("Main thread name: {}".format(threading.main_thread().name))
# creating threads
t1 = threading.Thread(target=task1, name='t1')
t2 = threading.Thread(target=task2, name='t2')
# starting threads
t1.start()
t2.start()
# wait until all threads finish
t1.join()
t2.join()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentcation is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import os
import signal
import tarfile
from multiprocessing import Process, Pipe
logger = logging.getLogger(__name__)
# Import third-party libs
# pylint: disable=import-error, 3rd-party-module-not-gated
import cherrypy
try:
from cherrypy.lib import cpstats
except AttributeError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed. '
'Possible upstream bug: '
'https://github.com/cherrypy/cherrypy/issues/1444')
except ImportError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed.')
# pylint: enable=import-error, 3rd-party-module-not-gated
# Import Salt libs
import salt
import salt.auth
import salt.exceptions
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
from salt.ext import six
from salt.ext.six import BytesIO
# Import salt-api libs
import salt.netapi
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == 'OPTIONS':
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = [
'Content-Type',
'X-Auth-Token',
'X-Requested-With',
]
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
# CORS requests should short-circuit the other tools.
cherrypy.response.body = ''
cherrypy.response.status = 200
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session['token'] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', salt.utils.json.dumps),
('application/x-yaml', functools.partial(
salt.utils.yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, 'TimeoutError') and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
if six.PY3:
response = salt.utils.stringutils.to_bytes(response)
return response
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
'on_start_resource': [
('html_override', html_override_tool),
('salt_token', salt_token_tool),
],
'before_request_body': [
('cors_tool', cors_tool),
('salt_auth', salt_auth_tool),
('hypermedia_in', hypermedia_in),
],
'before_handler': [
('lowdata_fmt', lowdata_fmt),
('hypermedia_out', hypermedia_out),
('salt_ip_verify', salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(cherrypy.tools, tool_name, cherrypy.Tool(
hook, tool_fn, priority=(50 + idx)))
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.salt_token.on': True,
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if 'token' in chunk:
# Make sure that auth token is hex
try:
int(chunk['token'], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, 'Invalid token')
if 'token' in chunk:
# Make sure that auth token is hex
try:
int(chunk['token'], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, 'Invalid token')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
@cherrypy.config(**{'tools.sessions.on': False})
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = {'client': 'runner'}
if jid:
lowstate.update({'fun': 'jobs.list_job', 'jid': jid})
else:
lowstate.update({'fun': 'jobs.list_jobs'})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns:
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.StringIO(pub_key))
tarball.addfile(priv_key_file, six.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Token(LowDataAdapter):
'''
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
'''
@cherrypy.config(**{'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
'''
for creds in cherrypy.request.lowstate:
try:
creds.update({
'client': 'runner',
'fun': 'auth.mk_token',
'kwarg': {
'username': creds['username'],
'password': creds['password'],
'eauth': creds['eauth'],
},
})
except KeyError:
raise cherrypy.HTTPError(400,
'Require "username", "password", and "eauth" params')
return list(self.exec_lowstate())
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instead,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: text
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield str('retry: 400\n') # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield str('tag: {0}\n').format(data.get('tag', '')) # future lint: disable=blacklisted-function
yield str('data: {0}\n\n').format(salt.utils.json.dumps(data)) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
str('data: {0}\n\n').format(salt.utils.json.dumps(data)), # future lint: disable=blacklisted-function
False
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def __init__(self):
if cherrypy.config['apiopts'].get('stats_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'token': Token,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
if self.apiopts.get('enable_sessions', True) is False:
url_blacklist = ['login', 'logout', 'minions', 'jobs']
else:
url_blacklist = []
urls = ((url, cls) for url, cls in six.iteritems(self.url_map)
if url not in url_blacklist)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, '12.0.0') < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf['global']['engine.timeout_monitor.on'] = self.apiopts.get(
'expire_responses', True
)
if cpstats and self.apiopts.get('collect_stats', False):
conf['/']['tools.cpstats.on'] = True
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
__main__.py
|
import queue
import threading
import time
from subprocess import Popen
from types import SimpleNamespace
import os
import traceback
from haste_storage_client.haste_priority_queue_client import HastePriorityQueueClient
from haste.desktop_agent import golden
from haste.desktop_agent.golden import get_golden_prio_for_filename
from watchdog.observers import Observer
import asyncio
import logging
import vironova_image_compression.ben_images.file_utils
from haste.desktop_agent.FSEvents import HasteFSEventHandler
from haste.desktop_agent.args import initialize
from haste.desktop_agent.config import MAX_CONCURRENT_XFERS, QUIT_AFTER, DELETE, FAKE_UPLOAD, \
FAKE_UPLOAD_SPEED_BITS_PER_SECOND
from haste.desktop_agent.interestingness_model import EstimatedPreprocessingEfficacyInterestingnessModel
from haste.desktop_agent.post_file import send_file
# TODO: store timestamps also and clear the old ones
already_written_filenames = set()
# files_to_send = []
# needs_sorting = False
timestamp_first_event = -1
stats_total_bytes_sent = 0
stats_preprocessed_files_sent = 0
stats_not_preprocessed_files_sent = 0
stats_events_pushed_first_queue = 0
stats_events_pushed_second_queue_preprocessed = 0
stats_events_pushed_second_queue_raw = 0
stats_total_preproc_duration = 0
path, dot_and_extension, stream_id_tag, username, password, host, stream_id, x_preprocessing_cores, x_mode = initialize()
assert path.endswith('/')
ground_truth = golden.csv_results[0:QUIT_AFTER]
golden_estimated_scores = (ground_truth['input_file_size_bytes'] - ground_truth[
'output_file_size_bytes']) / ground_truth['duration_total']
interestingness_model = EstimatedPreprocessingEfficacyInterestingnessModel()
queue_client = HastePriorityQueueClient(QUIT_AFTER, x_mode, golden_estimated_scores, interestingess_model=interestingness_model)
time_last_full_dir_listing = -1
TOO_LONG = 0.005
def new_file(info):
# Align the info from the watchdog to the HASTE API.
info.timestamp = time.time()
info.location = 0
info.original_filename = info.src_path
queue_client.save(timestamp=info.timestamp,
location=info.location,
substream_id=None,
blob_bytes=None,
metadata=info)
def thread_worker_poll_fs():
global time_last_full_dir_listing
try:
while True:
PAUSE = 0.5
# There are some issues with the watchdog -- sometimes files seem to be missed.
# As a workaround, do a full directory listing each second.
if time_last_full_dir_listing > 0:
pause = (time_last_full_dir_listing + PAUSE) - time.time()
if pause > 0:
time.sleep(pause)
else:
logging.warn(f'fs poll overran by {pause}')
filenames = os.listdir(path)
time_last_full_dir_listing = time.time()
# logging.info(f'completed scan - {len(filenames)}')
# print(f'completed scan - {len(filenames)}')
for filename in filenames:
if filename in already_written_filenames:
continue
filenames.sort() # timestamp at front of filename
new_event = SimpleNamespace()
new_event.src_path = path + filename
new_event.is_directory = False
put_event_on_queue(new_event)
except Exception as ex:
logging.error(f'exception on polling thread: {traceback.format_exc()}')
def put_event_on_queue(event):
global timestamp_first_event, stats_events_pushed_first_queue
# Called on a worker thread. Put an FS event on the thread-safe queue.
if event.is_directory:
logging.debug(f'skipping directory: {event.src_path}')
return
src_path = event.src_path
if (dot_and_extension is not None) and (not src_path.endswith(dot_and_extension)):
logging.debug(f'Ignoring file because of extension: {event}')
return
# Set is a hashset, this is O(1)
if src_path.split('/')[-1] in already_written_filenames:
logging.debug(f'File already sent: {src_path}')
return
already_written_filenames.add(src_path.split('/')[-1])
event.timestamp = time.time()
if timestamp_first_event < 0:
timestamp_first_event = event.timestamp
if False:
event.golden_bytes_reduction = get_golden_prio_for_filename(src_path.split('/')[-1])
event.preprocessed = False
event.file_size = vironova_image_compression.ben_images.file_utils.get_file_size(event.src_path)
# Queue never full, has infinite capacity.
events_to_process_mt_queue.put(event, block=True)
stats_events_pushed_first_queue += 1
logging.info(
f'put_event_on_queue() -- pushed event: {event.src_path} -- stats_events_pushed_first_queue: {stats_events_pushed_first_queue}')
async def xfer_events_from_fs(name):
# Async on the main thread. Xfer events from the thread-safe queue onto the async queue on the main thread.
logging.debug(f'{name} started')
time_after_async = 0
try:
while True:
try:
event = events_to_process_mt_queue.get_nowait()
time_blocked = time.time() - time_after_async
if time_blocked > TOO_LONG:
logging.info(f'xfer_events_from_fs spent {time_blocked} blocking main thread')
await push_event(event)
except queue.Empty:
await asyncio.sleep(0.1)
time_after_async = time.time()
except Exception as ex:
print(ex)
async def preprocess_async_loop_new_proc(name, queue):
global stats_total_preproc_duration
count = 0
try:
while True:
file_system_event = await pop_event(True)
if file_system_event is not None:
logging.info(f'preprocessing: {file_system_event.src_path}')
output_filepath = '/tmp/' + file_system_event.src_path.split('/')[-1]
# line_to_send = f"{file_system_event.src_path},{output_filepath}\n"
#
# # add to the buffer
# proc.stdin.write(line_to_send.encode())
# await proc.stdin.drain()
#
# stdoutline = await proc.stdout.readline()
# stdoutline = stdoutline.decode().strip()
# logging.info(f'stdout from preprocessor: {stdoutline}')
#
# dur_preproc = float(stdoutline.split(',')[0])
# dur_waiting = float(stdoutline.split(',')[1])
# logging.debug(f'preprocessor waiting: {dur_waiting}')
time_start = time.time()
proc = await asyncio.create_subprocess_shell(
f'python3 -m vironova_image_compression.ben_images.threshold_overwrite {file_system_event.src_path} {output_filepath}',
stdout=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE)
# Wait for it to terminate.
await proc.wait()
dur_preproc = time.time() - time_start
file_system_event2 = SimpleNamespace()
file_system_event2.timestamp = time.time()
file_system_event2.src_path = output_filepath
file_system_event2.file_size = vironova_image_compression.ben_images.file_utils.get_file_size(output_filepath)
file_system_event2.golden_bytes_reduction = (
file_system_event.file_size - file_system_event2.file_size) / dur_preproc
stats_total_preproc_duration += dur_preproc
file_system_event2.preprocessed = True
file_system_event2.index = file_system_event.index
event_to_re_add = file_system_event2
count += 1
logging.info(f'preprocessed {count} files')
if DELETE:
os.unlink(file_system_event.src_path)
else:
# We've preprocessed everything. just re-add the original event.
event_to_re_add = file_system_event
await push_event(event_to_re_add)
queue.task_done()
# We've preprocessed everything for now. just re-add the original event and 'sleep' a little.
if file_system_event is None:
await asyncio.sleep(0.2)
except Exception as ex:
logging.error(traceback.format_exc())
raise ex
async def preprocess_async_loop_service(name, queue):
global stats_total_preproc_duration
count = 0
try:
proc = await asyncio.create_subprocess_shell(
'python3 -m haste.desktop_agent.preprocessor',
stdout=asyncio.subprocess.PIPE,
stdin=asyncio.subprocess.PIPE)
while True:
file_system_event = await pop_event(True)
if file_system_event is not None:
logging.info(f'preprocessing: {file_system_event.src_path}')
output_filepath = '/tmp/' + file_system_event.src_path.split('/')[-1]
line_to_send = f"{file_system_event.src_path},{output_filepath}\n"
# add to the buffer
proc.stdin.write(line_to_send.encode())
await proc.stdin.drain()
stdoutline = await proc.stdout.readline()
stdoutline = stdoutline.decode().strip()
logging.info(f'stdout from preprocessor: {stdoutline}')
dur_preproc = float(stdoutline.split(',')[0])
dur_waiting = float(stdoutline.split(',')[1])
logging.debug(f'preprocessor waiting: {dur_waiting}')
file_system_event2 = SimpleNamespace()
file_system_event2.timestamp = time.time()
file_system_event2.src_path = output_filepath
file_system_event2.file_size = vironova_image_compression.ben_images.file_utils.get_file_size(output_filepath)
file_system_event2.golden_bytes_reduction = (
file_system_event.file_size - file_system_event2.file_size) / dur_preproc
stats_total_preproc_duration += dur_preproc
file_system_event2.preprocessed = True
file_system_event2.index = file_system_event.index
event_to_re_add = file_system_event2
count += 1
logging.info(f'preprocessed {count} files')
if DELETE:
os.unlink(file_system_event.src_path)
else:
# We've preprocessed everything. just re-add the original event.
event_to_re_add = file_system_event
await push_event(event_to_re_add)
queue.task_done()
# We've preprocessed everything for now. just re-add the original event and 'sleep' a little.
if file_system_event is None:
await asyncio.sleep(0.2)
except Exception as ex:
logging.error(traceback.format_exc())
raise ex
# def log_queue_info():
# # Log info about the present state of the queue
# count_preprocessed = len(list(filter(lambda f: f.preprocessed, files_to_send)))
# count_not_preprocessed = len(files_to_send) - count_preprocessed
# logging.info(f'PLOT - {time.time()} - {count_preprocessed} - {count_not_preprocessed}')
async def push_event(event_to_re_add):
global needs_sorting, stats_events_pushed_second_queue_preprocessed, stats_events_pushed_second_queue_raw
if event_to_re_add is not None:
if event_to_re_add.preprocessed:
stats_events_pushed_second_queue_preprocessed += 1
else:
stats_events_pushed_second_queue_raw += 1
logging.info(
f'push_event() - raw:{stats_events_pushed_second_queue_raw}')
if event_to_re_add.preprocessed:
queue_client.notify_preprocessed(event_to_re_add.index, event_to_re_add.golden_bytes_reduction,
event_to_re_add)
else:
new_file(event_to_re_add)
return await events_to_process_async_queue.put(object())
async def pop_event(for_preprocessing):
await events_to_process_async_queue.get()
start = time.time()
if for_preprocessing:
index, result = queue_client.next_to_preprocess()
else:
index, result = queue_client.pop_for_sending()
logging.debug(f'popping_took: {time.time() - start}')
if result is not None:
result.index = index
return result
async def worker_send_files(name, queue):
global stats_total_bytes_sent, stats_preprocessed_files_sent, stats_not_preprocessed_files_sent
# Process events from the queue on the main thread.
logging.debug(f'Worker {name} started')
last = 0
try:
while True:
if time.time() - last > TOO_LONG:
logging.info(f'worker_took: {time.time() - last}')
file_system_event = await pop_event(False)
logging.debug(f'event {file_system_event} popped from queue')
# takes ~0.0003s
# start_file_read = time.time()
# f = open(file_system_event.src_path, 'rb')
# filelike = f.read()
# f.close()
# logging.info(f'file_read_took: {time.time()-start_file_read}')
filelike = file_system_event.src_path
if FAKE_UPLOAD:
# (only 1 concurrent upload)
fake_upload_time = (file_system_event.file_size * 8) / FAKE_UPLOAD_SPEED_BITS_PER_SECOND
logging.info(f'Fake sleep for: {fake_upload_time}')
await asyncio.sleep(fake_upload_time)
else:
response = await send_file(file_system_event, filelike, stream_id_tag, stream_id, username, password,
host)
logging.debug(f'Server response body: {response}')
last = time.time()
if DELETE:
start_delete = time.time()
if False:
# this takes ~0.005...at ~10Hz this is too slow?
# close_fds makes the parent process' file handles inaccessible for the child.
proc = Popen(f'rm {file_system_event.src_path}', shell=True, stdin=None, stdout=None, stderr=None,
close_fds=True)
else:
os.unlink(file_system_event.src_path)
logging.debug(f'starting delete took: {start_delete - time.time()}')
stats_total_bytes_sent += file_system_event.file_size
if file_system_event.preprocessed:
stats_preprocessed_files_sent += 1
else:
stats_not_preprocessed_files_sent += 1
queue.task_done()
queue_client.notify_popped(file_system_event.index)
logging.info(
f'total_bytes_sent: {stats_total_bytes_sent} preprocessed_files_sent: {stats_preprocessed_files_sent} raw_files_sent: {stats_not_preprocessed_files_sent}')
# Benchmarking hack
if stats_not_preprocessed_files_sent + stats_preprocessed_files_sent == QUIT_AFTER:
logging.info(
f'Queue_is_empty. Duration since first event: {time.time() - timestamp_first_event} - total_bytes_sent: {stats_total_bytes_sent} preprocessed_files_sent: {stats_preprocessed_files_sent} raw_files_sent: {stats_not_preprocessed_files_sent} stats_total_preproc_duration: {stats_total_preproc_duration}')
# master_queue.plot()
quit()
except Exception as ex:
logging.error(f'Exception on {name}: {traceback.format_exc()}')
print(ex)
async def main():
# -u for unbuffered stdout (some issues with async code/autoflushing)
global events_to_process_mt_queue, events_to_process_async_queue
events_to_process_mt_queue = queue.Queue() # thread safe queue, no async support.
events_to_process_async_queue = asyncio.Queue() # async Queue, not thread safe
if False:
# Unreliable -- some files missing.
observer = Observer()
event_handler = HasteFSEventHandler(put_event_on_queue)
observer.schedule(event_handler, path, recursive=True)
observer.start()
else:
# poll instead. this approach doesnt support subfolders
poll_thread = threading.Thread(target=thread_worker_poll_fs, daemon=True)
poll_thread.start()
# Create workers to process events from the queue.
# Here, there is a single worker thread.
tasks = []
task = asyncio.create_task(xfer_events_from_fs(f'events-xfer'))
tasks.append(task)
for i in range(x_preprocessing_cores):
if False:
task = asyncio.create_task(preprocess_async_loop_service(f'preprocess-{i}', events_to_process_async_queue))
else:
task = asyncio.create_task(preprocess_async_loop_new_proc(f'preprocess-{i}', events_to_process_async_queue))
tasks.append(task)
for i in range(MAX_CONCURRENT_XFERS):
task = asyncio.create_task(worker_send_files(f'worker-{i}', events_to_process_async_queue))
tasks.append(task)
logging.info(f'began watching {path}')
try:
while True:
await asyncio.sleep(1)
except KeyboardInterrupt:
observer.stop()
await asyncio.gather(*tasks, return_exceptions=True)
if observer is not None:
observer.join()
if __name__ == '__main__':
if True:
asyncio.run(main())
else:
# Debug mode.
EventLoopDelayMonitor(interval=1)
# (couldn't find anything)
# https://stackoverflow.com/questions/38856410/monitoring-the-asyncio-event-loop
loop = asyncio.get_event_loop()
loop.slow_callback_duration = TOO_LONG
loop.set_debug(True) # Enable debug
loop.run_until_complete(main())
|
test_client.py
|
import asyncio
import concurrent.futures
import copy
import datetime
import functools
import os
import re
import threading
import warnings
from base64 import b64decode, b64encode
from queue import Empty
from typing import Any
from unittest.mock import MagicMock, Mock
import nbformat
import pytest
import xmltodict
from jupyter_client import KernelClient, KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from nbconvert.filters import strip_ansi
from nbformat import NotebookNode
from testpath import modified_env
from traitlets import TraitError
from .. import NotebookClient, execute
from ..exceptions import CellExecutionError
from .base import NBClientTestsBase
addr_pat = re.compile(r'0x[0-9a-f]{7,9}')
current_dir = os.path.dirname(__file__)
ipython_input_pat = re.compile(
r'(<ipython-input-\d+-[0-9a-f]+>|<IPY-INPUT>) in (<module>|<cell line: \d>\(\))'
)
# Tracebacks look different in IPython 8,
# see: https://github.com/ipython/ipython/blob/master/docs/source/whatsnew/version8.rst#traceback-improvements # noqa
ipython8_input_pat = re.compile(
r'(Input In \[\d+\]|<IPY-INPUT>), in (<module>|<cell line: \d>\(\))'
)
hook_methods = [
"on_cell_start",
"on_cell_execute",
"on_cell_complete",
"on_cell_executed",
"on_cell_error",
"on_notebook_start",
"on_notebook_complete",
"on_notebook_error",
]
def get_executor_with_hooks(nb=None, executor=None, async_hooks=False):
if async_hooks:
hooks = {key: AsyncMock() for key in hook_methods}
else:
hooks = {key: MagicMock() for key in hook_methods}
if nb is not None:
if executor is not None:
raise RuntimeError("Cannot pass nb and executor at the same time")
executor = NotebookClient(nb)
for k, v in hooks.items():
setattr(executor, k, v)
return executor, hooks
EXECUTE_REPLY_OK = {
'parent_header': {'msg_id': 'fake_id'},
'content': {'status': 'ok', 'execution_count': 1},
}
EXECUTE_REPLY_ERROR = {
'parent_header': {'msg_id': 'fake_id'},
'content': {'status': 'error'},
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
}
class AsyncMock(Mock):
pass
def make_future(obj: Any) -> asyncio.Future:
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
future: asyncio.Future = asyncio.Future(loop=loop)
future.set_result(obj)
return future
def normalize_base64(b64_text):
# if it's base64, pass it through b64 decode/encode to avoid
# equivalent values from being considered unequal
try:
return b64encode(b64decode(b64_text.encode('ascii'))).decode('ascii')
except (ValueError, TypeError):
return b64_text
def run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = executor.execute()
return input_nb, output_nb
def run_notebook_wrapper(args):
# since concurrent.futures.ProcessPoolExecutor doesn't have starmap,
# we need to unpack the arguments
return run_notebook(*args)
async def async_run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = await executor.async_execute()
return input_nb, output_nb
def prepare_cell_mocks(*messages_input, reply_msg=None):
"""
This function prepares a executor object which has a fake kernel client
to mock the messages sent over zeromq. The mock kernel client will return
the messages passed into this wrapper back from ``preproc.kc.iopub_channel.get_msg``
callbacks. It also appends a kernel idle message to the end of messages.
"""
parent_id = 'fake_id'
messages = list(messages_input)
# Always terminate messages with an idle to exit the loop
messages.append({'msg_type': 'status', 'content': {'execution_state': 'idle'}})
def shell_channel_message_mock():
# Return the message generator for
# self.kc.shell_channel.get_msg => {'parent_header': {'msg_id': parent_id}}
return AsyncMock(
return_value=make_future(
NBClientTestsBase.merge_dicts(
{
'parent_header': {'msg_id': parent_id},
'content': {'status': 'ok', 'execution_count': 1},
},
reply_msg or {},
)
)
)
def iopub_messages_mock():
# Return the message generator for
# self.kc.iopub_channel.get_msg => messages[i]
return AsyncMock(
side_effect=[
# Default the parent_header so mocks don't need to include this
make_future(
NBClientTestsBase.merge_dicts({'parent_header': {'msg_id': parent_id}}, msg)
)
for msg in messages
]
)
def prepared_wrapper(func):
@functools.wraps(func)
def test_mock_wrapper(self):
"""
This inner function wrapper populates the executor object with
the fake kernel client. This client has its iopub and shell
channels mocked so as to fake the setup handshake and return
the messages passed into prepare_cell_mocks as the execute_cell loop
processes them.
"""
cell_mock = NotebookNode(
source='"foo" = "bar"', metadata={}, cell_type='code', outputs=[]
)
class NotebookClientWithParentID(NotebookClient):
parent_id: str
executor = NotebookClientWithParentID({}) # type:ignore
executor.nb = {'cells': [cell_mock]} # type:ignore
# self.kc.iopub_channel.get_msg => message_mock.side_effect[i]
message_mock = iopub_messages_mock()
executor.kc = MagicMock(
iopub_channel=MagicMock(get_msg=message_mock),
shell_channel=MagicMock(get_msg=shell_channel_message_mock()),
execute=MagicMock(return_value=parent_id),
is_alive=MagicMock(return_value=make_future(True)),
)
executor.parent_id = parent_id
return func(self, executor, cell_mock, message_mock)
return test_mock_wrapper
return prepared_wrapper
def normalize_output(output):
"""
Normalizes outputs for comparison.
"""
output = dict(output)
if 'metadata' in output:
del output['metadata']
if 'text' in output:
output['text'] = re.sub(addr_pat, '<HEXADDR>', output['text'])
if 'text/plain' in output.get('data', {}):
output['data']['text/plain'] = re.sub(addr_pat, '<HEXADDR>', output['data']['text/plain'])
if 'application/vnd.jupyter.widget-view+json' in output.get('data', {}):
output['data']['application/vnd.jupyter.widget-view+json']['model_id'] = '<MODEL_ID>'
if 'image/svg+xml' in output.get('data', {}):
output['data']['image/svg+xml'] = xmltodict.parse(output['data']['image/svg+xml'])
for key, value in output.get('data', {}).items():
if isinstance(value, str):
output['data'][key] = normalize_base64(value)
if 'traceback' in output:
tb = []
for line in output["traceback"]:
line = re.sub(ipython_input_pat, '<IPY-INPUT>', strip_ansi(line))
line = re.sub(ipython8_input_pat, '<IPY-INPUT>', strip_ansi(line))
tb.append(line)
output['traceback'] = tb
return output
def assert_notebooks_equal(expected, actual):
expected_cells = expected['cells']
actual_cells = actual['cells']
assert len(expected_cells) == len(actual_cells)
for expected_cell, actual_cell in zip(expected_cells, actual_cells):
# Uncomment these to help debug test failures better
# from pprint import pprint
# pprint(expected_cell)
# pprint(actual_cell)
expected_outputs = expected_cell.get('outputs', [])
actual_outputs = actual_cell.get('outputs', [])
normalized_expected_outputs = list(map(normalize_output, expected_outputs))
normalized_actual_outputs = list(map(normalize_output, actual_outputs))
assert normalized_expected_outputs == normalized_actual_outputs
expected_execution_count = expected_cell.get('execution_count', None)
actual_execution_count = actual_cell.get('execution_count', None)
assert expected_execution_count == actual_execution_count
def notebook_resources():
"""
Prepare a notebook resources dictionary for executing test
notebooks in the ``files`` folder.
"""
return {'metadata': {'path': os.path.join(current_dir, 'files')}}
def filter_messages_on_error_output(err_output):
allowed_lines = [
# ipykernel migh be installed without debugpy extension
"[IPKernelApp] WARNING | debugpy_stream undefined, debugging will not be enabled",
]
filtered_result = [line for line in err_output.splitlines() if line not in allowed_lines]
return os.linesep.join(filtered_result)
@pytest.mark.parametrize(
["input_name", "opts"],
[
("Other Comms.ipynb", dict(kernel_name="python")),
("Clear Output.ipynb", dict(kernel_name="python")),
("Empty Cell.ipynb", dict(kernel_name="python")),
("Factorials.ipynb", dict(kernel_name="python")),
("HelloWorld.ipynb", dict(kernel_name="python")),
("Inline Image.ipynb", dict(kernel_name="python")),
(
"Interrupt.ipynb",
dict(kernel_name="python", timeout=1, interrupt_on_timeout=True, allow_errors=True),
),
("JupyterWidgets.ipynb", dict(kernel_name="python")),
("Skip Exceptions with Cell Tags.ipynb", dict(kernel_name="python")),
("Skip Exceptions.ipynb", dict(kernel_name="python", allow_errors=True)),
("Skip Execution with Cell Tag.ipynb", dict(kernel_name="python")),
("SVG.ipynb", dict(kernel_name="python")),
("Unicode.ipynb", dict(kernel_name="python")),
("UnicodePy3.ipynb", dict(kernel_name="python")),
("update-display-id.ipynb", dict(kernel_name="python")),
("Check History in Memory.ipynb", dict(kernel_name="python")),
],
)
def test_run_all_notebooks(input_name, opts):
"""Runs a series of test notebooks and compares them to their actual output"""
input_file = os.path.join(current_dir, 'files', input_name)
input_nb, output_nb = run_notebook(input_file, opts, notebook_resources())
assert_notebooks_equal(input_nb, output_nb)
def test_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
threads = [
threading.Thread(target=run_notebook, args=(input_file.format(label=label), opts, res))
for label in ("A", "B")
]
for t in threads:
t.start()
for t in threads:
t.join(timeout=2)
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_many_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
executor.map(run_notebook_wrapper, [(input_file, opts, res) for i in range(8)])
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_async_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
async def run_tasks():
tasks = [
async_run_notebook(input_file.format(label=label), opts, res)
for label in ("A", "B")
]
await asyncio.gather(*tasks)
asyncio.run(run_tasks())
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_many_async_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
async def run_tasks():
tasks = [async_run_notebook(input_file, opts, res) for i in range(4)]
await asyncio.gather(*tasks)
asyncio.run(run_tasks())
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_execution_timing():
"""Compare the execution timing information stored in the cell with the
actual time it took to run the cell. Also check for the cell timing string
format."""
opts = dict(kernel_name="python")
input_name = "Sleep1s.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
input_nb, output_nb = run_notebook(input_file, opts, res)
def get_time_from_str(s):
time_format = '%Y-%m-%dT%H:%M:%S.%fZ'
return datetime.datetime.strptime(s, time_format)
execution_timing = output_nb['cells'][1]['metadata']['execution']
status_busy = get_time_from_str(execution_timing['iopub.status.busy'])
execute_input = get_time_from_str(execution_timing['iopub.execute_input'])
execute_reply = get_time_from_str(execution_timing['shell.execute_reply'])
status_idle = get_time_from_str(execution_timing['iopub.status.idle'])
cell_start = get_time_from_str(output_nb['cells'][2]['outputs'][0]['text'])
cell_end = get_time_from_str(output_nb['cells'][3]['outputs'][0]['text'])
delta = datetime.timedelta(milliseconds=100)
assert status_busy - cell_start < delta
assert execute_input - cell_start < delta
assert execute_reply - cell_end < delta
assert status_idle - cell_end < delta
def test_synchronous_setup_kernel():
nb = nbformat.v4.new_notebook()
executor = NotebookClient(nb)
with executor.setup_kernel():
# Prove it initialized client
assert executor.kc is not None
# Prove it removed the client (and hopefully cleaned up)
assert executor.kc is None
def test_startnewkernel_with_kernelmanager():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
executor.start_new_kernel()
kc = executor.start_new_kernel_client()
# prove it initialized client
assert kc is not None
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
def test_start_new_kernel_history_file_setting():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
kc = km.client()
# Should start empty
assert executor.extra_arguments == []
# Should assign memory setting for ipykernel
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# Should not add a second hist_file assignment
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
def test_start_new_kernel_client_cleans_up_kernel_on_failure():
class FakeClient(KernelClient):
def start_channels(
self,
shell: bool = True,
iopub: bool = True,
stdin: bool = True,
hb: bool = True,
control: bool = True,
) -> None:
raise Exception("Any error")
def stop_channels(self) -> None:
pass
nb = nbformat.v4.new_notebook()
km = KernelManager()
km.client_factory = FakeClient
executor = NotebookClient(nb, km=km)
executor.start_new_kernel()
assert km.has_kernel
assert executor.km is not None
with pytest.raises(Exception) as err:
executor.start_new_kernel_client()
assert str(err.value.args[0]) == "Any error"
assert executor.kc is None
assert executor.km is None
assert not km.has_kernel
class TestExecute(NBClientTestsBase):
"""Contains test functions for execute.py"""
maxDiff = None
def test_constructor(self):
NotebookClient({}) # type:ignore
def test_populate_language_info(self):
nb = nbformat.v4.new_notebook() # Certainly has no language_info.
executor = NotebookClient(nb, kernel_name="python")
nb = executor.execute()
assert 'language_info' in nb.metadata
def test_empty_path(self):
"""Can the kernel be started when the path is empty?"""
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
res = self.build_resources()
res['metadata']['path'] = ''
input_nb, output_nb = run_notebook(filename, {}, res)
assert_notebooks_equal(input_nb, output_nb)
@pytest.mark.xfail(
"python3" not in KernelSpecManager().find_kernel_specs(),
reason="requires a python3 kernelspec",
)
def test_empty_kernel_name(self):
"""Can kernel in nb metadata be found when an empty string is passed?
Note: this pattern should be discouraged in practice.
Passing in no kernel_name to NotebookClient is recommended instead.
"""
filename = os.path.join(current_dir, 'files', 'UnicodePy3.ipynb')
res = self.build_resources()
input_nb, output_nb = run_notebook(filename, {"kernel_name": ""}, res)
assert_notebooks_equal(input_nb, output_nb)
with pytest.raises(TraitError):
input_nb, output_nb = run_notebook(filename, {"kernel_name": None}, res)
def test_disable_stdin(self):
"""Test disabling standard input"""
filename = os.path.join(current_dir, 'files', 'Disable Stdin.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
input_nb, output_nb = run_notebook(filename, dict(allow_errors=True), res)
# We need to special-case this particular notebook, because the
# traceback contains machine-specific stuff like where IPython
# is installed. It is sufficient here to just check that an error
# was thrown, and that it was a StdinNotImplementedError
self.assertEqual(len(output_nb['cells']), 1)
self.assertEqual(len(output_nb['cells'][0]['outputs']), 1)
output = output_nb['cells'][0]['outputs'][0]
self.assertEqual(output['output_type'], 'error')
self.assertEqual(output['ename'], 'StdinNotImplementedError')
self.assertEqual(
output['evalue'],
'raw_input was called, but this frontend does not support input requests.',
)
def test_timeout(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(TimeoutError) as err:
run_notebook(filename, dict(timeout=1), res)
self.assertEqual(
str(err.value.args[0]),
"""A cell timed out while it was being executed, after 1 seconds.
The message was: Cell execution timed out.
Here is a preview of the cell contents:
-------------------
while True: continue
-------------------
""",
)
def test_timeout_func(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
def timeout_func(source):
return 10
with pytest.raises(TimeoutError):
run_notebook(filename, dict(timeout_func=timeout_func), res)
def test_kernel_death_after_timeout(self):
"""Check that an error is raised when the kernel is_alive is false after a cell timed out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
executor = NotebookClient(input_nb, timeout=1)
with pytest.raises(TimeoutError):
executor.execute()
km = executor.create_kernel_manager()
async def is_alive():
return False
km.is_alive = is_alive
# Will be a RuntimeError or subclass DeadKernelError depending
# on if jupyter_client or nbconvert catches the dead client first
with pytest.raises(RuntimeError):
input_nb, output_nb = executor.execute()
def test_kernel_death_during_execution(self):
"""Check that an error is raised when the kernel is_alive is false during a cell
execution.
"""
filename = os.path.join(current_dir, 'files', 'Autokill.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(input_nb)
with pytest.raises(RuntimeError):
executor.execute()
def test_allow_errors(self):
"""
Check that conversion halts if ``allow_errors`` is False.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(allow_errors=False), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_force_raise_errors(self):
"""
Check that conversion halts if the ``force_raise_errors`` traitlet on
NotebookClient is set to True.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions with Cell Tags.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(force_raise_errors=True), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_reset_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, a new one must have been created
kc = executor.kc
assert kc is not None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, the previously created one must have been reused
assert kc == executor.kc
executor.execute(reset_kc=True, cleanup_kc=False)
# we asked to reset the kernel client, the previous one must have been cleaned up,
# a new one must have been created
assert kc != executor.kc
def test_cleanup_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute()
# we asked to cleanup the kernel client (default is True)
assert executor.kc is None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client
# a new one must have been created and should still be available
assert executor.kc is not None
def test_custom_kernel_manager(self):
from .fake_kernelmanager import FakeCustomKernelManager
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
executor = NotebookClient(
cleaned_input_nb,
resources=self.build_resources(),
kernel_manager_class=FakeCustomKernelManager,
)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
executor.execute()
expected = FakeCustomKernelManager.expected_methods.items()
for method, call_count in expected:
self.assertNotEqual(call_count, 0, f'{method} was called')
def test_process_message_wrapper(self):
outputs: list = []
class WrappedPreProc(NotebookClient):
def process_message(self, msg, cell, cell_index):
result = super().process_message(msg, cell, cell_index)
if result:
outputs.append(result)
return result
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
wpp = WrappedPreProc(input_nb)
executed = wpp.execute()
assert outputs == [{'name': 'stdout', 'output_type': 'stream', 'text': 'Hello World\n'}]
assert_notebooks_equal(original, executed)
def test_execute_function(self):
# Test the execute() convenience API
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
executed = execute(original, os.path.dirname(filename))
assert_notebooks_equal(original, executed)
def test_widgets(self):
"""Runs a test notebook with widgets and checks the widget state is saved."""
input_file = os.path.join(current_dir, 'files', 'JupyterWidgets.ipynb')
opts = dict(kernel_name="python")
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(input_file)
input_nb, output_nb = run_notebook(input_file, opts, res)
output_data = [
output.get('data', {}) for cell in output_nb['cells'] for output in cell['outputs']
]
model_ids = [
data['application/vnd.jupyter.widget-view+json']['model_id']
for data in output_data
if 'application/vnd.jupyter.widget-view+json' in data
]
wdata = output_nb['metadata']['widgets']['application/vnd.jupyter.widget-state+json']
for k in model_ids:
d = wdata['state'][k]
assert 'model_name' in d
assert 'model_module' in d
assert 'state' in d
assert 'version_major' in wdata
assert 'version_minor' in wdata
def test_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor, hooks = get_executor_with_hooks(nb=input_nb)
executor.execute()
hooks["on_cell_start"].assert_called_once()
hooks["on_cell_execute"].assert_called_once()
hooks["on_cell_complete"].assert_called_once()
hooks["on_cell_executed"].assert_called_once()
hooks["on_cell_error"].assert_not_called()
hooks["on_notebook_start"].assert_called_once()
hooks["on_notebook_complete"].assert_called_once()
hooks["on_notebook_error"].assert_not_called()
def test_error_execution_hook_error(self):
filename = os.path.join(current_dir, 'files', 'Error.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor, hooks = get_executor_with_hooks(nb=input_nb)
with pytest.raises(CellExecutionError):
executor.execute()
hooks["on_cell_start"].assert_called_once()
hooks["on_cell_execute"].assert_called_once()
hooks["on_cell_complete"].assert_called_once()
hooks["on_cell_executed"].assert_called_once()
hooks["on_cell_error"].assert_called_once()
hooks["on_notebook_start"].assert_called_once()
hooks["on_notebook_complete"].assert_called_once()
hooks["on_notebook_error"].assert_not_called()
def test_error_notebook_hook(self):
filename = os.path.join(current_dir, 'files', 'Autokill.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor, hooks = get_executor_with_hooks(nb=input_nb)
with pytest.raises(RuntimeError):
executor.execute()
hooks["on_cell_start"].assert_called_once()
hooks["on_cell_execute"].assert_called_once()
hooks["on_cell_complete"].assert_called_once()
hooks["on_cell_executed"].assert_not_called()
hooks["on_cell_error"].assert_not_called()
hooks["on_notebook_start"].assert_called_once()
hooks["on_notebook_complete"].assert_called_once()
hooks["on_notebook_error"].assert_called_once()
def test_async_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor, hooks = get_executor_with_hooks(nb=input_nb)
executor.execute()
hooks["on_cell_start"].assert_called_once()
hooks["on_cell_execute"].assert_called_once()
hooks["on_cell_complete"].assert_called_once()
hooks["on_cell_executed"].assert_called_once()
hooks["on_cell_error"].assert_not_called()
hooks["on_notebook_start"].assert_called_once()
hooks["on_notebook_complete"].assert_called_once()
hooks["on_notebook_error"].assert_not_called()
def test_error_async_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'Error.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor, hooks = get_executor_with_hooks(nb=input_nb)
with pytest.raises(CellExecutionError):
executor.execute()
hooks["on_cell_start"].assert_called_once()
hooks["on_cell_execute"].assert_called_once()
hooks["on_cell_complete"].assert_called_once()
hooks["on_cell_executed"].assert_called_once()
hooks["on_cell_error"].assert_called_once()
hooks["on_notebook_start"].assert_called_once()
hooks["on_notebook_complete"].assert_called_once()
hooks["on_notebook_error"].assert_not_called()
class TestRunCell(NBClientTestsBase):
"""Contains test functions for NotebookClient.execute_cell"""
@prepare_cell_mocks()
def test_idle_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# Just the exit message should be fetched
assert message_mock.call_count == 1
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'parent_header': {'msg_id': 'wrong_parent'},
'content': {'name': 'stdout', 'text': 'foo'},
}
)
def test_message_for_wrong_parent(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An ignored stream followed by an idle
assert message_mock.call_count == 2
# Ensure no output was written
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'status',
'header': {'msg_type': 'status'},
'content': {'execution_state': 'busy'},
}
)
def test_busy_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One busy message, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_deadline_exec_reply(self, executor, cell_mock, message_mock):
# exec_reply is never received, so we expect to hit the timeout.
async def get_msg(timeout):
await asyncio.sleep(timeout)
raise Empty
executor.kc.shell_channel.get_msg = get_msg
executor.timeout = 1
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks()
def test_deadline_iopub(self, executor, cell_mock, message_mock):
# The shell_channel will complete, so we expect only to hit the iopub timeout.
message_mock.side_effect = Empty()
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_eventual_deadline_iopub(self, executor, cell_mock, message_mock):
# Process a few messages before raising a timeout from iopub
def message_seq(messages):
yield from messages
while True:
yield Empty()
message_mock.side_effect = message_seq(list(message_mock.side_effect)[:-1])
executor.kc.shell_channel.get_msg = Mock(
return_value=make_future({'parent_header': {'msg_id': executor.parent_id}})
)
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count >= 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{'msg_type': 'execute_input', 'header': {'msg_type': 'execute_input'}, 'content': {}}
)
def test_execute_input_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One ignored execute_input, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_stream_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout then stderr stream followed by an idle
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {}},
)
def test_clear_output_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Ensure the output was cleared
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
)
def test_clear_output_wait_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Should be true without another message to trigger the clear
self.assertTrue(executor.clear_before_next_output)
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_clear_output_wait_then_message_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert not executor.clear_before_next_output
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
},
)
def test_clear_output_wait_then_update_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert executor.clear_before_next_output
# Ensure the output wasn't cleared yet because update_display doesn't add outputs
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message_ignored_on_override(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0, execution_count=21)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 21
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'execution_count': 42, 'name': 'stdout', 'text': 'foo'},
}
)
def test_execution_count_with_stream_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should also consume the message stream
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {'comm_id': 'foobar', 'data': {'state': {'foo': 'bar'}}},
}
)
def test_widget_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message without buffer info followed by an idle
assert message_mock.call_count == 2
self.assertEqual(executor.widget_state, {'foobar': {'foo': 'bar'}})
# Buffers should still be empty
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]},
},
}
)
def test_widget_comm_buffer_message_single(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 2
assert executor.widget_state == {'foobar': {'foo': 'bar'}}
assert executor.widget_buffers == {
'foobar': {('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']}}
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]},
},
},
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo2': 'bar2'}, 'buffer_paths': [['path2']]},
},
},
)
def test_widget_comm_buffer_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 3
assert executor.widget_state == {'foobar': {'foo': 'bar', 'foo2': 'bar2'}}
assert executor.widget_buffers == {
'foobar': {
('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']},
('path2',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path2']},
}
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {
'comm_id': 'foobar',
# No 'state'
'data': {'foo': 'bar'},
},
}
)
def test_unknown_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An unknown comm message followed by an idle
assert message_mock.call_count == 2
# Widget states should be empty as the message has the wrong shape
assert not executor.widget_state
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_with_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
}
)
def test_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar_other'},
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_display_data_same_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 4
# Original output should be manipulated and a copy of the second now
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_update_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 2
# Display updates don't create any outputs
assert cell_mock.outputs == []
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar2'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_mismatch_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 3
# Display updates don't create any outputs
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an update then an idle
assert message_mock.call_count == 3
# Original output should be manipulated
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
}
)
def test_error_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_and_error_status_messages(self, executor, cell_mock, message_mock):
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Cell outputs should still be copied
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# OK
'content': {'status': 'ok'},
},
)
def test_error_message_only(self, executor, cell_mock, message_mock):
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_allow_errors(self, executor, cell_mock, message_mock):
executor.allow_errors = True
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error', 'ename': 'NotImplementedError'},
}
)
def test_allow_error_names(self, executor, cell_mock, message_mock):
executor.allow_error_names = ['NotImplementedError']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_raises_exception_tag(self, executor, cell_mock, message_mock):
cell_mock.metadata['tags'] = ['raises-exception']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_non_code_cell(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[])
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_no_source(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(
# Stripped source is empty
source=' ',
metadata={},
cell_type='code',
outputs=[],
)
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks()
def test_cell_hooks(self, executor, cell_mock, message_mock):
executor, hooks = get_executor_with_hooks(executor=executor)
executor.execute_cell(cell_mock, 0)
hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_execute"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_complete"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_executed"].assert_called_once_with(
cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_OK
)
hooks["on_cell_error"].assert_not_called()
hooks["on_notebook_start"].assert_not_called()
hooks["on_notebook_complete"].assert_not_called()
hooks["on_notebook_error"].assert_not_called()
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_cell_hooks(self, executor, cell_mock, message_mock):
executor, hooks = get_executor_with_hooks(executor=executor)
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_execute"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_complete"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_executed"].assert_called_once_with(
cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_ERROR
)
hooks["on_cell_error"].assert_called_once_with(
cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_ERROR
)
hooks["on_notebook_start"].assert_not_called()
hooks["on_notebook_complete"].assert_not_called()
hooks["on_notebook_error"].assert_not_called()
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_non_code_cell_hooks(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[])
executor, hooks = get_executor_with_hooks(executor=executor)
executor.execute_cell(cell_mock, 0)
hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_execute"].assert_not_called()
hooks["on_cell_complete"].assert_not_called()
hooks["on_cell_executed"].assert_not_called()
hooks["on_cell_error"].assert_not_called()
hooks["on_notebook_start"].assert_not_called()
hooks["on_notebook_complete"].assert_not_called()
hooks["on_notebook_error"].assert_not_called()
@prepare_cell_mocks()
def test_async_cell_hooks(self, executor, cell_mock, message_mock):
executor, hooks = get_executor_with_hooks(executor=executor, async_hooks=True)
executor.execute_cell(cell_mock, 0)
hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_execute"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_complete"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_executed"].assert_called_once_with(
cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_OK
)
hooks["on_cell_error"].assert_not_called()
hooks["on_notebook_start"].assert_not_called()
hooks["on_notebook_complete"].assert_not_called()
hooks["on_notebook_error"].assert_not_called()
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_async_cell_hooks(self, executor, cell_mock, message_mock):
executor, hooks = get_executor_with_hooks(executor=executor, async_hooks=True)
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
hooks["on_cell_start"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_execute"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_complete"].assert_called_once_with(cell=cell_mock, cell_index=0)
hooks["on_cell_executed"].assert_called_once_with(
cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_ERROR
)
hooks["on_cell_error"].assert_called_once_with(
cell=cell_mock, cell_index=0, execute_reply=EXECUTE_REPLY_ERROR
)
hooks["on_notebook_start"].assert_not_called()
hooks["on_notebook_complete"].assert_not_called()
hooks["on_notebook_error"].assert_not_called()
|
test_client.py
|
#!/usr/bin/env python
# Copyright 2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is NOT a POX component. It's a little tool to test out the messenger.
"""
import socket
import threading
import json
class JSONDestreamer (object):
import json
decoder = json.JSONDecoder()
def __init__ (self, callback = None):
self._buf = ''
self.callback = callback if callback else self.rx
def push (self, data):
if len(self._buf) == 0:
data = data.lstrip()
self._buf += data
try:
while len(self._buf) > 0:
r,off = self.decoder.raw_decode(self._buf)
self._buf = self._buf[off:].lstrip()
self.callback(r)
except ValueError:
pass
def rx (self, data):
import json
print "Recv:", json.dumps(data, indent=4)
jd = JSONDestreamer()
done = False
def reader (socket):
global done
while True:
d = socket.recv(1024)
if d == "":
done = True
break
jd.push(d)
cur_chan = None
def channel (ch):
global cur_chan
cur_chan = ch
import readline
def main (addr = "127.0.0.1", port = 7790):
print "Connecting to %s:%i" % (addr,port)
port = int(port)
sock = socket.create_connection((addr, port))
t = threading.Thread(target=reader, args=(sock,))
t.daemon = True
t.start()
while not done:
try:
#print ">",
m = raw_input()
if len(m) == 0: continue
m = eval(m)
if not isinstance(m, dict):
continue
if cur_chan is not None and 'CHANNEL' not in m:
m['CHANNEL'] = cur_chan
m = json.dumps(m)
sock.send(m)
except EOFError:
break
except KeyboardInterrupt:
break
except:
import traceback
traceback.print_exc()
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
else:
# This will get run if you try to run this as a POX component.
def launch ():
from pox.core import core
log = core.getLogger()
log.critical("This isn't a POX component.")
log.critical("Please see the documentation.")
raise RuntimeError("This isn't a POX component.")
|
__main__.py
|
import sys
from signal import signal, SIGINT
from threading import Thread
try:
from Queue import Queue # Python2
except ImportError:
from queue import Queue # Python3
from occacc.logger import logger, LOG, ErrorFilter, ErrorMessage
from occacc.mqtt import Mqtt
from occacc.config import MQTT, CAMERAS, COMMAND_PREFIX
from occacc.occupancy import on_passage, on_correction
def handler(signal, frame):
sys.exit(0)
def start_process():
# Config up MQTT
mqtt = Mqtt(**MQTT)
# Topics for passages
topics = list(CAMERAS)
mqtt.client.on_message = on_passage
# Topics for correction commands, only for masters/zones
for ztopic in list(value['zone_topic'] for key, value in CAMERAS.items() if 'zone_topic' in value):
topic = '{}/{}'.format(COMMAND_PREFIX,ztopic)
topics.append(topic)
mqtt.client.message_callback_add(topic, on_correction)
for t in topics:
mqtt.client.subscribe(t, 1)
th = Thread(target=mqtt.start, args=(topics,))
th.daemon = True
th.start()
return th
if __name__ == "__main__":
signal(SIGINT, handler)
# Create a thread safe queue object from IPC
queue = Queue()
# Redirect stderr to a filter object
# We use this to search for exception in other worker threads and exit module
# to trigger systemd to restart module.
sys.stderr = ErrorFilter(queue)
start_process()
while True:
msg = queue.get()
if isinstance(msg, ErrorMessage):
logger('Exception in thread detected, exiting...', LOG.FATAL)
break
|
road_speed_limiter.py
|
import json
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.params import Params
from common.numpy_fast import interp, clip
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
CAMERA_SPEED_FACTOR = 1.05
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 2843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
#gps = Thread(target=self.gps_thread, args=[])
#gps.setDaemon(True)
#gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
try:
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
except:
pass
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
try:
sock.bind(('0.0.0.0', 843))
except:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
dat.roadLimitSpeed.camSpeedFactor = server.get_limit_val("cam_speed_factor", CAMERA_SPEED_FACTOR)
roadLimitSpeed.send(dat.to_bytes())
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.start_dist = 0
self.longcontrol = Params().get_bool('LongControlEnabled')
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, cluster_speed, is_metric):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
camSpeedFactor = clip(self.roadLimitSpeed.camSpeedFactor, 1.0, 1.1)
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
# log = "RECV: " + str(is_highway)
# log += ", " + str(cam_limit_speed)
# log += ", " + str(cam_limit_speed_left_dist)
# log += ", " + str(section_limit_speed)
# log += ", " + str(section_left_dist)
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
v_ego = cluster_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
v_limit = cam_limit_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
diff_speed = cluster_speed - cam_limit_speed
v_diff = v_ego - v_limit
if self.longcontrol:
sec = interp(v_diff, [2.7, 8.3], [15., 20.])
else:
sec = interp(v_diff, [2.7, 8.3], [17., 23.])
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < v_ego * sec):
if not self.slowing_down:
self.start_dist = cam_limit_speed_left_dist * 1.2
self.slowing_down = True
first_started = True
else:
first_started = False
base = self.start_dist / 1.2 * 0.65
td = self.start_dist - base
d = cam_limit_speed_left_dist - base
if d > 0 and td > 0. and diff_speed > 0 and (section_left_dist is None or section_left_dist < 10):
pp = d / td
else:
pp = 0
return cam_limit_speed * camSpeedFactor + int(
pp * diff_speed), cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * camSpeedFactor, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(cluster_speed, is_metric):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(cluster_speed, is_metric)
if __name__ == "__main__":
main()
|
server.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
try:
import socketserver
except ImportError:
import SocketServer as socketserver
import socket
import ssl
import mimetypes
import webbrowser
import struct
import socket
import base64
import hashlib
import sys
import threading
import signal
import time
import os
import re
try:
from urllib import unquote
from urllib import quote
from urlparse import urlparse
from urlparse import parse_qs
except ImportError:
from urllib.parse import unquote
from urllib.parse import quote
from urllib.parse import unquote_to_bytes
from urllib.parse import urlparse
from urllib.parse import parse_qs
import cgi
import weakref
clients = {}
runtimeInstances = weakref.WeakValueDictionary()
pyLessThan3 = sys.version_info < (3,)
_MSG_ACK = '3'
_MSG_JS = '2'
_MSG_UPDATE = '1'
def to_websocket(data):
# encoding end decoding utility function
if pyLessThan3:
return quote(data)
return quote(data, encoding='utf-8')
def from_websocket(data):
# encoding end decoding utility function
if pyLessThan3:
return unquote(data)
return unquote(data, encoding='utf-8')
def encode_text(data):
if not pyLessThan3:
return data.encode('utf-8')
return data
def get_method_by_name(root_node, name):
val = None
if hasattr(root_node, name):
val = getattr(root_node, name)
return val
def get_method_by_id(_id):
global runtimeInstances
return runtimeInstances.get(str(_id), None)
def parse_session_cookie(cookie_to_cook):
""" cookie_to_cook = http_header['cookie']
"""
#print("cookie_to_cook: %s"%str(cookie_to_cook))
session_value = None
tokens = cookie_to_cook.split(";")
for tok in tokens:
if 'remi_session=' in tok:
#print("found session id: %s"%str(tok))
try:
session_value = int(tok.replace('remi_session=', ''))
except:
pass
return session_value
class WebSocketsHandler(socketserver.StreamRequestHandler):
magic = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
def __init__(self, headers, *args, **kwargs):
self.headers = headers
self.handshake_done = False
self._log = logging.getLogger('remi.server.ws')
socketserver.StreamRequestHandler.__init__(self, *args, **kwargs)
def setup(self):
socketserver.StreamRequestHandler.setup(self)
self._log.info('connection established: %r' % (self.client_address,))
self.handshake_done = False
def handle(self):
global clients
self._log.debug('handle')
# on some systems like ROS, the default socket timeout
# is less than expected, we force it to infinite (None) as default socket value
self.request.settimeout(None)
if self.handshake():
while True:
if not self.read_next_message():
clients[self.session].websockets.remove(self)
self.handshake_done = False
self._log.debug('ws ending websocket service')
break
@staticmethod
def bytetonum(b):
if pyLessThan3:
b = ord(b)
return b
def read_next_message(self):
# noinspection PyBroadException
try:
try:
length = self.rfile.read(2)
except ValueError:
# socket was closed, just return without errors
return False
length = self.bytetonum(length[1]) & 127
if length == 126:
length = struct.unpack('>H', self.rfile.read(2))[0]
elif length == 127:
length = struct.unpack('>Q', self.rfile.read(8))[0]
masks = [self.bytetonum(byte) for byte in self.rfile.read(4)]
decoded = ''
for char in self.rfile.read(length):
decoded += chr(self.bytetonum(char) ^ masks[len(decoded) % 4])
self.on_message(from_websocket(decoded))
except socket.timeout:
return False
except Exception:
return False
return True
def send_message(self, message):
if not self.handshake_done:
self._log.warning("ignoring message %s (handshake not done)" % message[:10])
return
self._log.debug('send_message: %s... -> %s' % (message[:10], self.client_address))
out = bytearray()
out.append(129)
length = len(message)
if length <= 125:
out.append(length)
elif 126 <= length <= 65535:
out.append(126)
out += struct.pack('>H', length)
else:
out.append(127)
out += struct.pack('>Q', length)
if not pyLessThan3:
message = message.encode('utf-8')
out = out + message
self.request.send(out)
def handshake(self):
self._log.debug('handshake')
key = self.headers['Sec-WebSocket-Key']
self.session = None
if 'cookie' in self.headers:
if self.headers['cookie']!=None:
self.session = parse_session_cookie(self.headers['cookie'])
if self.session == None:
return False
if not self.session in clients.keys():
return False
digest = hashlib.sha1((key.encode("utf-8")+self.magic))
digest = digest.digest()
digest = base64.b64encode(digest)
response = 'HTTP/1.1 101 Switching Protocols\r\n'
response += 'Upgrade: websocket\r\n'
response += 'Connection: Upgrade\r\n'
response += 'Sec-WebSocket-Accept: %s\r\n\r\n' % digest.decode("utf-8")
self._log.info('handshake complete')
self.request.sendall(response.encode("utf-8"))
self.handshake_done = True
#if an update happens since the websocket connection to its handshake,
# it gets not displayed. it is required to inform App about handshake done,
# to get a full refresh
clients[self.session].websocket_handshake_done(self)
return True
def on_message(self, message):
global runtimeInstances
self.send_message(_MSG_ACK)
with clients[self.session].update_lock:
# noinspection PyBroadException
try:
# saving the websocket in order to update the client
if self not in clients[self.session].websockets:
clients[self.session].websockets.append(self)
# parsing messages
chunks = message.split('/')
self._log.debug('on_message: %s' % chunks[0])
if len(chunks) > 3: # msgtype,widget,function,params
# if this is a callback
msg_type = 'callback'
if chunks[0] == msg_type:
widget_id = chunks[1]
function_name = chunks[2]
params = message[
len(msg_type) + len(widget_id) + len(function_name) + 3:]
param_dict = parse_parametrs(params)
callback = get_method_by_name(runtimeInstances[widget_id], function_name)
if callback is not None:
callback(**param_dict)
except Exception:
self._log.error('error parsing websocket', exc_info=True)
def close(self):
try:
self.request.shutdown(socket.SHUT_WR)
self.finish()
self.server.shutdown()
except:
self._log.error("exception in WebSocketsHandler.close method", exc_info=True)
def parse_parametrs(p):
"""
Parses the parameters given from POST or websocket reqs
expecting the parameters as: "11|par1='asd'|6|par2=1"
returns a dict like {par1:'asd',par2:1}
"""
ret = {}
while len(p) > 1 and p.count('|') > 0:
s = p.split('|')
l = int(s[0]) # length of param field
if l > 0:
p = p[len(s[0]) + 1:]
field_name = p.split('|')[0].split('=')[0]
field_value = p[len(field_name) + 1:l]
p = p[l + 1:]
ret[field_name] = field_value
return ret
# noinspection PyPep8Naming
class App(BaseHTTPRequestHandler, object):
"""
This class will handles any incoming request from the browser
The main application class can subclass this
In the do_POST and do_GET methods it is expected to receive requests such as:
- function calls with parameters
- file requests
"""
re_static_file = re.compile(r"^([\/]*[\w\d]+:[-_. $@?#£'%=()\/\[\]!+°§^,\w\d]+)") #https://regex101.com/r/uK1sX1/6
re_attr_call = re.compile(r"^/*(\w+)\/(\w+)\?{0,1}(\w*\={1}(\w|\.)+\&{0,1})*$")
def __init__(self, request, client_address, server, **app_args):
self._app_args = app_args
self.root = None
self._log = logging.getLogger('remi.request')
super(App, self).__init__(request, client_address, server)
def _get_list_from_app_args(self, name):
try:
v = self._app_args[name]
if isinstance(v, (tuple, list)):
vals = v
else:
vals = [v]
except KeyError:
vals = []
return vals
def _instance(self):
global clients
global runtimeInstances
"""
This method is used to get the Application instance previously created
managing on this, it is possible to switch to "single instance for
multiple clients" or "multiple instance for multiple clients" execution way
"""
self.session = 0
#checking previously defined session
if 'cookie' in self.headers:
self.session = parse_session_cookie(self.headers['cookie'])
#if not a valid session id
if self.session == None:
self.session = 0
if not self.session in clients.keys():
self.session = 0
#if no session id
if self.session == 0:
if self.server.multiple_instance:
self.session = int(time.time()*1000)
#send session to browser
del self.headers['cookie']
#if the client instance doesn't exist
if not(self.session in clients):
self.update_interval = self.server.update_interval
from remi import gui
head = gui.HEAD(self.server.title)
# use the default css, but append a version based on its hash, to stop browser caching
head.add_child('internal_css', "<link href='/res:style.css' rel='stylesheet' />\n")
body = gui.BODY()
body.onload.connect(self.onload)
body.onerror.connect(self.onerror)
body.ononline.connect(self.ononline)
body.onpagehide.connect(self.onpagehide)
body.onpageshow.connect(self.onpageshow)
body.onresize.connect(self.onresize)
self.page = gui.HTML()
self.page.add_child('head', head)
self.page.add_child('body', body)
if not hasattr(self, 'websockets'):
self.websockets = []
self.update_lock = threading.RLock()
if not hasattr(self, '_need_update_flag'):
self._need_update_flag = False
self._stop_update_flag = False
if self.update_interval > 0:
self._update_thread = threading.Thread(target=self._idle_loop)
self._update_thread.setDaemon(True)
self._update_thread.start()
runtimeInstances[str(id(self))] = self
clients[self.session] = self
else:
#restore instance attributes
client = clients[self.session]
self.websockets = client.websockets
self.page = client.page
self.update_lock = client.update_lock
self.update_interval = client.update_interval
self._need_update_flag = client._need_update_flag
if hasattr(client, '_update_thread'):
self._update_thread = client._update_thread
net_interface_ip = self.headers.get('Host', "%s:%s"%(self.connection.getsockname()[0],self.server.server_address[1]))
websocket_timeout_timer_ms = str(self.server.websocket_timeout_timer_ms)
pending_messages_queue_length = str(self.server.pending_messages_queue_length)
self.page.children['head'].set_internal_js(net_interface_ip, pending_messages_queue_length, websocket_timeout_timer_ms)
def main(self, *_):
""" Subclasses of App class *must* declare a main function
that will be the entry point of the application.
Inside the main function you have to declare the GUI structure
and return the root widget. """
raise NotImplementedError("Applications must implement 'main()' function.")
def _idle_loop(self):
""" This is used to exec the idle function in a safe context and a separate thread
"""
while not self._stop_update_flag:
time.sleep(self.update_interval)
with self.update_lock:
try:
self.idle()
except:
self._log.error("exception in App.idle method", exc_info=True)
if self._need_update_flag:
try:
self.do_gui_update()
except:
self._log.error('''exception during gui update. It is advisable to
use App.update_lock using external threads.''', exc_info=True)
def idle(self):
""" Idle function called every UPDATE_INTERVAL before the gui update.
Useful to schedule tasks. """
pass
def _need_update(self, emitter=None):
if self.update_interval == 0:
#no interval, immadiate update
self.do_gui_update()
else:
#will be updated after idle loop
self._need_update_flag = True
def do_gui_update(self):
""" This method gets called also by Timer, a new thread, and so needs to lock the update
"""
with self.update_lock:
changed_widget_dict = {}
self.root.repr(changed_widget_dict)
for widget in changed_widget_dict.keys():
html = changed_widget_dict[widget]
__id = str(widget.identifier)
self._send_spontaneous_websocket_message(_MSG_UPDATE + __id + ',' + to_websocket(html))
self._need_update_flag = False
def websocket_handshake_done(self, ws_instance_to_update):
with self.update_lock:
msg = "0" + self.root.identifier + ',' + to_websocket(self.page.children['body'].innerHTML({}))
ws_instance_to_update.send_message(msg)
def set_root_widget(self, widget):
self.page.children['body'].append(widget, 'root')
self.root = widget
self.root.disable_refresh()
self.root.attributes['data-parent-widget'] = str(id(self))
self.root._parent = self
self.root.enable_refresh()
msg = "0" + self.root.identifier + ',' + to_websocket(self.page.children['body'].innerHTML({}))
self._send_spontaneous_websocket_message(msg)
def _send_spontaneous_websocket_message(self, message):
for ws in self.websockets:
# noinspection PyBroadException
try:
#self._log.debug("sending websocket spontaneous message")
ws.send_message(message)
except:
self._log.error("sending websocket spontaneous message", exc_info=True)
try:
self.websockets.remove(ws)
ws.close()
except:
self._log.error("unable to remove websocket client - already not in list", exc_info=True)
def execute_javascript(self, code):
self._send_spontaneous_websocket_message(_MSG_JS + code)
def notification_message(self, title, content, icon=""):
"""This function sends "javascript" message to the client, that executes its content.
In this particular code, a notification message is shown
"""
code = """
var options = {
body: "%(content)s",
icon: "%(icon)s"
}
if (!("Notification" in window)) {
alert("%(content)s");
}else if (Notification.permission === "granted") {
var notification = new Notification("%(title)s", options);
}else if (Notification.permission !== 'denied') {
Notification.requestPermission(function (permission) {
if (permission === "granted") {
var notification = new Notification("%(title)s", options);
}
});
}
""" % {'title': title, 'content': content, 'icon': icon}
self.execute_javascript(code)
def do_POST(self):
self._instance()
file_data = None
# listener_widget = None
# listener_function = None
try:
# Parse the form data posted
filename = self.headers['filename']
listener_widget = runtimeInstances[self.headers['listener']]
listener_function = self.headers['listener_function']
form = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
# Echo back information about what was posted in the form
for field in form.keys():
field_item = form[field]
if field_item.filename:
# The field contains an uploaded file
file_data = field_item.file.read()
file_len = len(file_data)
self._log.debug('post: uploaded %s as "%s" (%d bytes)\n' % (field, field_item.filename, file_len))
get_method_by_name(listener_widget, listener_function)(file_data, filename)
else:
# Regular form value
self._log.debug('post: %s=%s\n' % (field, form[field].value))
if file_data is not None:
# the filedata is sent to the listener
self._log.debug('GUI - server.py do_POST: fileupload name= %s' % (filename))
self.send_response(200)
except Exception:
self._log.error('post: failed', exc_info=True)
self.send_response(400)
self.send_header('Content-type', 'text/plain')
self.end_headers()
def do_HEAD(self):
self.send_response(200)
self.end_headers()
def do_AUTHHEAD(self):
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"Protected\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
# check here request header to identify the type of req, if http or ws
# if this is a ws req, instance a ws handler, add it to App's ws list, return
if "Upgrade" in self.headers:
if self.headers['Upgrade'] == 'websocket':
#passing arguments to websocket handler, otherwise it will lost the last message,
# and will be unable to handshake
ws = WebSocketsHandler(self.headers, self.request, self.client_address, self.server)
return
"""Handler for the GET requests."""
do_process = False
if self.server.auth is None:
do_process = True
else:
if not ('Authorization' in self.headers) or self.headers['Authorization'] is None:
self._log.info("Authenticating")
self.do_AUTHHEAD()
self.wfile.write(encode_text('no auth header received'))
elif self.headers['Authorization'] == 'Basic ' + self.server.auth.decode():
do_process = True
else:
self.do_AUTHHEAD()
self.wfile.write(encode_text(self.headers['Authorization']))
self.wfile.write(encode_text('not authenticated'))
if do_process:
path = str(unquote(self.path))
# noinspection PyBroadException
try:
self._instance()
# build the page (call main()) in user code, if not built yet
with self.update_lock:
# build the root page once if necessary
if not 'root' in self.page.children['body'].children.keys():
self._log.info('built UI (path=%s)' % path)
self.set_root_widget(self.main(*self.server.userdata))
self._process_all(path)
except:
self._log.error('error processing GET request', exc_info=True)
def _get_static_file(self, filename):
filename = filename.replace("..", "") #avoid backdirs
__i = filename.find(':')
if __i < 0:
return None
key = filename[:__i]
path = filename[__i+1:]
key = key.replace("/","")
paths = {'res': os.path.join(os.path.dirname(__file__), "res")}
static_paths = self._app_args.get('static_file_path', {})
if not type(static_paths)==dict:
self._log.error("App's parameter static_file_path must be a Dictionary.", exc_info=False)
static_paths = {}
paths.update(static_paths)
if not key in paths:
return None
return os.path.join(paths[key], path)
def _process_all(self, func):
self._log.debug('get: %s' % func)
static_file = self.re_static_file.match(func)
attr_call = self.re_attr_call.match(func)
if (func == '/') or (not func):
self.send_response(200)
self.send_header("Set-Cookie", "remi_session=%s"%(self.session))
self.send_header('Content-type', 'text/html')
self.end_headers()
with self.update_lock:
# render the HTML
page_content = self.page.repr()
self.wfile.write(encode_text("<!DOCTYPE html>\n"))
self.wfile.write(encode_text(page_content))
elif static_file:
filename = self._get_static_file(static_file.groups()[0])
if not filename:
self.send_response(404)
return
mimetype, encoding = mimetypes.guess_type(filename)
self.send_response(200)
self.send_header('Content-type', mimetype if mimetype else 'application/octet-stream')
if self.server.enable_file_cache:
self.send_header('Cache-Control', 'public, max-age=86400')
self.end_headers()
with open(filename, 'rb') as f:
content = f.read()
self.wfile.write(content)
elif attr_call:
with self.update_lock:
param_dict = parse_qs(urlparse(func).query)
# parse_qs returns patameters as list, here we take the first element
for k in param_dict:
param_dict[k] = param_dict[k][0]
widget, func = attr_call.group(1, 2)
try:
content, headers = get_method_by_name(get_method_by_id(widget), func)(**param_dict)
if content is None:
self.send_response(503)
return
self.send_response(200)
except IOError:
self._log.error('attr %s/%s call error' % (widget, func), exc_info=True)
self.send_response(404)
return
except (TypeError, AttributeError):
self._log.error('attr %s/%s not available' % (widget, func))
self.send_response(503)
return
for k in headers:
self.send_header(k, headers[k])
self.end_headers()
try:
self.wfile.write(content)
except TypeError:
self.wfile.write(encode_text(content))
def close(self):
""" Command to initiate an App to close
"""
self._log.debug('shutting down...')
self.server.server_starter_instance.stop()
def on_close(self):
""" Called by the server when the App have to be terminated
"""
self._stop_update_flag = True
for ws in self.websockets:
ws.close()
def onload(self, emitter):
""" WebPage Event that occurs on webpage loaded
"""
self._log.debug('App.onload event occurred')
def onerror(self, emitter, message, source, lineno, colno):
""" WebPage Event that occurs on webpage errors
"""
self._log.debug("""App.onerror event occurred in webpage:
\nMESSAGE:%s\nSOURCE:%s\nLINENO:%s\nCOLNO:%s\n"""%(message, source, lineno, colno))
def ononline(self, emitter):
""" WebPage Event that occurs on webpage goes online after a disconnection
"""
self._log.debug('App.ononline event occurred')
def onpagehide(self, emitter):
""" WebPage Event that occurs on webpage when the user navigates away
"""
self._log.debug('App.onpagehide event occurred')
def onpageshow(self, emitter):
""" WebPage Event that occurs on webpage gets shown
"""
self._log.debug('App.onpageshow event occurred')
def onresize(self, emitter, width, height):
""" WebPage Event that occurs on webpage gets resized
"""
self._log.debug('App.onresize event occurred. Width:%s Height:%s'%(width, height))
class ThreadedHTTPServer(socketserver.ThreadingMixIn, HTTPServer):
daemon_threads = False
# noinspection PyPep8Naming
def __init__(self, server_address, RequestHandlerClass,
auth, multiple_instance, enable_file_cache, update_interval,
websocket_timeout_timer_ms, pending_messages_queue_length,
title, server_starter_instance, certfile, keyfile, ssl_version, *userdata):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.auth = auth
self.multiple_instance = multiple_instance
self.enable_file_cache = enable_file_cache
self.update_interval = update_interval
self.websocket_timeout_timer_ms = websocket_timeout_timer_ms
self.pending_messages_queue_length = pending_messages_queue_length
self.title = title
self.server_starter_instance = server_starter_instance
self.userdata = userdata
self.certfile = certfile
self.keyfile = keyfile
self.ssl_version = ssl_version
if self.ssl_version!=None:
self.socket = ssl.wrap_socket(self.socket, keyfile=self.keyfile, certfile=self.certfile, server_side=True, ssl_version=self.ssl_version, do_handshake_on_connect=True)
class Server(object):
# noinspection PyShadowingNames
def __init__(self, gui_class, title='', start=True, address='127.0.0.1', port=0, username=None, password=None,
multiple_instance=False, enable_file_cache=True, update_interval=0.1, start_browser=True,
websocket_timeout_timer_ms=1000, pending_messages_queue_length=1000,
certfile=None, keyfile=None, ssl_version=None, userdata=()):
self._gui = gui_class
self._title = title or gui_class.__name__
self._sserver = None
self._sth = None
self._base_address = ''
self._address = address
self._sport = port
self._multiple_instance = multiple_instance
self._enable_file_cache = enable_file_cache
self._update_interval = update_interval
self._start_browser = start_browser
self._websocket_timeout_timer_ms = websocket_timeout_timer_ms
self._pending_messages_queue_length = pending_messages_queue_length
self._certfile = certfile
self._keyfile = keyfile
self._ssl_version = ssl_version
self._userdata = userdata
if username and password:
self._auth = base64.b64encode(encode_text("%s:%s" % (username, password)))
else:
self._auth = None
if not isinstance(userdata, tuple):
raise ValueError('userdata must be a tuple')
self._log = logging.getLogger('remi.server')
self._alive = True
if start:
self._myid = threading.Thread.ident
self.start()
self.serve_forever()
@property
def title(self):
return self._title
@property
def address(self):
return self._base_address
def start(self):
# Create a web server and define the handler to manage the incoming
# request
self._sserver = ThreadedHTTPServer((self._address, self._sport), self._gui, self._auth,
self._multiple_instance, self._enable_file_cache,
self._update_interval, self._websocket_timeout_timer_ms,
self._pending_messages_queue_length, self._title,
self, self._certfile, self._keyfile, self._ssl_version, *self._userdata)
shost, sport = self._sserver.socket.getsockname()[:2]
self._log.info('Started httpserver http://%s:%s/'%(shost,sport))
# when listening on multiple net interfaces the browsers connects to localhost
if shost == '0.0.0.0':
shost = '127.0.0.1'
self._base_address = 'http://%s:%s/' % (shost,sport)
if self._start_browser:
try:
import android
android.webbrowser.open(self._base_address)
except ImportError:
# use default browser instead of always forcing IE on Windows
if os.name == 'nt':
webbrowser.get('windows-default').open(self._base_address)
else:
webbrowser.open(self._base_address)
self._sth = threading.Thread(target=self._sserver.serve_forever)
self._sth.daemon = False
self._sth.start()
def serve_forever(self):
# we could join on the threads, but join blocks all interrupts (including
# ctrl+c, so just spin here
# noinspection PyBroadException
try:
def sig_manager(sig, callstack):
self.stop()
self._log.info('*** signal %d received.' % sig)
return signal.SIG_IGN
prev_handler = signal.signal(signal.SIGINT, sig_manager)
except Exception:
# signal.pause() is missing for Windows; wait 1ms and loop instead
pass
except KeyboardInterrupt:
pass
while self._alive:
try:
time.sleep(1)
except:
self._alive = False
self._log.debug(' ** serve_forever() quitting')
def stop(self):
global clients
self._alive = False
self._sserver.shutdown()
for client in clients.values():
client.on_close()
class StandaloneServer(Server):
def __init__(self, gui_class, title='', width=800, height=600, resizable=True, fullscreen=False, start=True,
userdata=()):
Server.__init__(self, gui_class, title=title, start=False, address='127.0.0.1', port=0, username=None,
password=None,
multiple_instance=False, enable_file_cache=True, update_interval=0.1, start_browser=False,
websocket_timeout_timer_ms=1000, pending_messages_queue_length=1000, userdata=userdata)
self._application_conf = {'width': width, 'height': height, 'resizable': resizable, 'fullscreen': fullscreen}
if start:
self.serve_forever()
def serve_forever(self):
try:
import webview
except ImportError:
raise ImportError('PyWebView is missing. Please install it by:\n '
'pip install pywebview\n '
'more info at https://github.com/r0x0r/pywebview')
else:
Server.start(self)
webview.create_window(self.title, self.address, **self._application_conf)
Server.stop(self)
def start(main_gui_class, **kwargs):
"""This method starts the webserver with a specific App subclass."""
debug = kwargs.pop('debug', False)
standalone = kwargs.pop('standalone', False)
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO,
format='%(name)-16s %(levelname)-8s %(message)s')
logging.getLogger('remi').setLevel(
level=logging.DEBUG if debug else logging.INFO)
if standalone:
s = StandaloneServer(main_gui_class, start=True, **kwargs)
else:
s = Server(main_gui_class, start=True, **kwargs)
|
prep_data.py
|
import os
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import json
import cv2
from time import time
import threading
import math
DATASET={'CCT':'iWildCam_2019_CCT','iNat':'iWildCam_2019_iNat_Idaho','IDFG':'iWildCam_IDFG'} #_images_small
DATA_DIR='./data/'
ANNOTATION_DIR =DATA_DIR+ 'iWildCam_2019_Annotations/'
def rewrite_train_data_json(dataset='CCT'):
json_path=ANNOTATION_DIR+DATASET[dataset]+'.json'
json_data = json.load(open(json_path,'r'))
images = json_data['images']
annotations = json_data['annotations']
csv_data={'category_id':[],'date_captured':[],'id':[],'file_name':[],
'rights_holder':[],'width':[],'height':[],'location':[]}
print('len of data:',dataset,len(images))
for ii,(img, annot) in enumerate(zip(images,annotations)):
if img['id'] != annot['image_id']:
print('there are some error in',ii,img['id'],annot['image_id'])
if 'date_captured' in img:
date=img['date_captured']
elif 'datetime' in img:
date = img['datetime']
else:
date = json_data['info']['date_created']
csv_data['date_captured'] += [date]
csv_data['category_id'] += [annot['category_id']]
csv_data['file_name'] += [img['file_name']]
csv_data['rights_holder'] += [img['rights_holder']]
csv_data['id'] += [img['id']]
csv_data['width'] += [img['width']]
csv_data['height'] += [img['height']]
if 'location' in img:
locat = img['location']
else:
locat=-1
csv_data['location'] += [locat]
csv_data = pd.DataFrame(csv_data)
csv_data.to_csv(ANNOTATION_DIR+DATASET[dataset]+'.csv',index=False)
def split_train_dev(CCT=True,iNat=True):
columns=['category_id','date_captured','id','file_name',
'rights_holder','width','height','location']
train=pd.DataFrame()
if CCT:
temp=pd.read_csv(ANNOTATION_DIR+DATASET['CCT']+'.csv')[columns]
temp['dataset'] = 'CCT'
temp['file_name'] = temp['file_name'].map(lambda x:'iWildCam_2019_CCT_images_small/'+x)
print('use CCT data',temp.shape)
train=pd.concat([train,temp])
if iNat:
temp=pd.read_csv(ANNOTATION_DIR+DATASET['iNat']+'.csv')[columns]
temp['dataset'] = 'iNat'
temp['file_name'] = temp['file_name'].map(lambda x: 'iWildCam_2019_iNat_Idaho/' + x)
print('use iNat data',temp.shape)
train=pd.concat([train,temp])
print('train shape',train.shape)
#train=train.sample(frac=1,random_state=0).reset_index(drop=True)
dev_file = train[train['location'] == 46] # 46
train_file = train[train['location'] != 46]
train_file.to_csv(DATA_DIR+'train_file.csv',index=False)
dev_file.to_csv(DATA_DIR+'dev_file.csv',index=False)
print('category ratio for train data:')
cnt = Counter(train_file['category_id'].values)
L = len(train_file)
for ii in range(23):
print(ii, cnt[ii], cnt[ii] / L)
print('category ratio for dev data:')
cnt = Counter(dev_file['category_id'].values)
L = len(dev_file)
for ii in range(23):
print(ii, cnt[ii], cnt[ii] / L)
def save_test():
columns=['date_captured','id','file_name',
'rights_holder','width','height','location']
test = pd.read_csv(DATA_DIR+'test.csv')[columns]
test['dataset'] = 'test'
test['category_id'] = -1
test['file_name'] = test['file_name'].map(lambda x:'test_images/'+x)
print('test shape',test.shape) #153730
test.to_csv(DATA_DIR+'test_file.csv',index=False)
full_data_dir='data/raw_data/iWildCam_2019_IDFG/iWildCam_IDFG_images/'
def get_test_orig_size_split(test_file,name=0):
name=str(name)
print('get_test_orig_size_split for thread',name,test_file.shape)
file_names= test_file['file_name'].values
width,height=[],[]
t1=time()
for ii,fname in enumerate(file_names):
mod_name =full_data_dir + fname.split('/')[-1]
image = cv2.imread(mod_name)
s = image.shape
#imageHeight = s[0]
#imageWidth = s[1]
width.append(s[0])
height.append(s[1])
if ii%100==0:
print('threads %s, index %d, time-cost %f min'%(name,ii,(time()-t1)/60))
if ii % 1000 == 0:
joblib.dump([ii,width,height],DATA_DIR+'raw_data/test_size_temp_{}.pkl'.format(name))
test_file['width']=width
test_file['height'] = height
print(name,'test shape',test_file.shape) #153730
test_file.to_csv(DATA_DIR+'raw_data/test_file_orig_{}.csv'.format(name),index=False)
def get_test_size_multi_thread(thread_num=1):
test_file = pd.read_csv(DATA_DIR+'test_file.csv')
test_file['small_width']=test_file['width']
test_file['small_height'] = test_file['height']
chunk=math.ceil(len(test_file)/thread_num)
thread_list=[]
for ii in range(thread_num):
sup_file=test_file.iloc[ii*chunk:(ii+1)*chunk]
thr=threading.Thread(target=get_test_orig_size_split,args=(sup_file,ii))
thread_list.append(thr)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
t.join()
def merge_test_size_file():
data=pd.DataFrame()
for name in range(10):
data_path=DATA_DIR + 'raw_data/test_file_orig_{}.csv'.format(str(name))
temp=pd.read_csv(data_path)
data=pd.concat([data,temp])
print(name,data.shape)
data.to_csv(DATA_DIR + 'raw_data/test_file.csv',index=False)
def main(CCT=True,iNat=True):
if CCT:
rewrite_train_data_json('CCT')
if iNat:
rewrite_train_data_json('iNat')
split_train_dev(CCT=CCT,iNat=iNat)
save_test()
if __name__=='__main__':
main()
#get_test_size_multi_thread(thread_num=10)
#merge_test_size_file()
|
test_executor.py
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
import time
import mesos.interface
from mesos.interface import mesos_pb2
import mesos.native
class MyExecutor(mesos.interface.Executor):
def launchTask(self, driver, task):
# Create a thread to run the task. Tasks should always be run in new
# threads or processes, rather than inside launchTask itself.
def run_task():
print "Running task %s" % task.task_id.value
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_RUNNING
update.data = 'data with a \0 byte'
driver.sendStatusUpdate(update)
# This is where one would perform the requested task.
print "Sending status update..."
update = mesos_pb2.TaskStatus()
update.task_id.value = task.task_id.value
update.state = mesos_pb2.TASK_FINISHED
update.data = 'data with a \0 byte'
driver.sendStatusUpdate(update)
print "Sent status update"
thread = threading.Thread(target=run_task)
thread.start()
def frameworkMessage(self, driver, message):
# Send it back to the scheduler.
driver.sendFrameworkMessage(message)
if __name__ == "__main__":
print "Starting executor"
driver = mesos.native.MesosExecutorDriver(MyExecutor())
sys.exit(0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1)
|
asp_solver.py
|
import sys
import clingo
import json
import time
import traceback
import signal
from threading import Thread
from time import sleep
import os
class IncrementalSolver:
'''
solv_type:
0: 2 solver call with theoric delta
1: continuous calls until solved on theoric delta
2: 1 solver call with optimum makespan
'''
def __init__(self, name, minimum_time, num_agents, min_sum, total_cost, solv_type, only_first):
self.program_name = name
self.minimum_time = minimum_time
self.min_sum = min_sum
self.total_cost = total_cost
self.resp = []
self.num_agents = num_agents
self.sol_time = -1
for a in range(self.num_agents):
self.resp.append([])
self.stats = None
self.solved = False
self.final_solved = False
self.moved_on_goal = False
self.first_makespan = -1
self.opt_makespan = -1
self.theoric_makespan = -1
self.makespan = -1
self.solv_type = solv_type
self.first_runtime = 0
self.only_first = only_first
self.sol_cost = -1
self.ground_time = 0
def main(self, ctl, files):
if self.solv_type == 0:
self.run_constant_delta(ctl, files)
elif self.solv_type == 1:
self.run_continuous_delta(ctl, files)
elif self.solv_type == 2:
self.run_opt_bound(ctl, files)
elif self.solv_type == 3:
self.run_extra(ctl, files)
elif self.solv_type == 4:
self.run_standard(ctl, files)
def run_standard(self, ctl, files):
if len(files) > 0:
for f in files:
ctl.load(f)
else:
ctl.load("-")
#self.resp = []
step = 0
while (step <= self.minimum_time):
for a in range(self.num_agents):
self.resp[a].append((0,0))
step+=1
init_time = time.time()
#signal.signal(signal.SIGINT, self.signal_handler)
'''
catchable_sigs = set(signal.Signals)
for sig in catchable_sigs:
try:
signal.signal(sig, self.signal_handler)
print("Setting ",sig)
print ("value {}".format(sig))
except (ValueError, OSError, RuntimeError) as m:
print(m)
print("Skipping ",sig)
print ("Value {}".format(sig))
'''
#thread = Thread(target=self.ground, args = [ctl])
#thread.start()
#ctl.interrupt()
#thread.join(10)
ctl.ground([("base", [])])
self.ground_time = time.time() - init_time
print('grounded: {0}'.format(self.ground_time))
ret = ctl.solve()
if ret.satisfiable:
self.stats = ctl.statistics
#self.first_stats = ctl.statistics
print(json.dumps(self.stats, sort_keys=True, indent=4, separators=(',', ': ')))
self.sol_cost = self.minimum_time * self.num_agents + ctl.statistics['summary']['costs'][0]
#self.sol_cost = ctl.statistics['summary']['costs'][0]
print('sic:', self.total_cost, 'optimization:',ctl.statistics['summary']['costs'][0], 'sol_cost:',self.sol_cost,'makespan:', self.minimum_time, 'agents:', self.num_agents)
#self.sol_cost = ctl.statistics['summary']['costs'][0]
#print(self.sol_cost)
#print(self.sol_cost)
#delta = self.sol_cost - self.minimum_time - self.min_sum
imax = self.minimum_time + self.sol_cost - 1 - self.total_cost
if imax < self.minimum_time:
imax = self.minimum_time
#imax = self.minimum_time + delta
self.theoric_makespan = imax
#print(self.resp)
def run_constant_delta(self, ctl, files):
if len(files) > 0:
for f in files:
ctl.load(f)
else:
ctl.load("-")
#ctl.ground([("base", [])])
imin = self.minimum_time
step, ret = 0, None
imax = 100
init_time = time.time()
while (step < imin):
for a in range(self.num_agents):
self.resp[a].append((0,0))
parts = []
#parts.append(("check", [step]))
parts.append(("step", [step]))
if step > 0:
parts.append(("evolution",[step]))
else:
parts.append(("base", []))
ctl.ground(parts)
step += 1
while (step <= imax):
for a in range(self.num_agents):
self.resp[a].append((0,0))
parts = []
parts.append(("check", [step]))
parts.append(("step", [step]))
if step > 0:
#ctl.release_external(clingo.Function("query", [step-1]))
parts.append(("evolution",[step]))
ctl.cleanup()
else:
parts.append(("base", []))
if step > imin:
ctl.release_external(clingo.Function("query", [step-1]))
ctl.cleanup()
ctl.ground(parts)
ctl.assign_external(clingo.Function("query", [step]), True)
if not self.solved or step == imax:
print('solving on makespan --- {0}'.format(step))
ret = ctl.solve(on_model=self.on_model)
if ret.satisfiable:
self.stats = ctl.statistics
self.makespan = step
self.sol_cost = self.total_cost + self.stats['summary']['costs'][0]
print("----")
print(self.sol_cost)
#self.sol_cost = self.stats['summary']['costs'][0]
#print(self.stats['summary']['costs'][0])
if not self.solved:
delta = self.sol_cost - self.makespan - self.min_sum
imax = step + delta
self.theoric_makespan = imax
print(self.min_sum)
print("----")
self.solved = True
self.first_stats = self.stats
self.first_makespan = step
print('found new makespan bound: {0}'.format(imax))
self.first_runtime = time.time() - init_time
if step == imax:
self.opt_makespan = step
self.final_solved = True
if self.only_first:
break
else:
self.opt_makespan = step
self.final_solved = True
step += 1
def run_continuous_delta(self, ctl, files):
if len(files) > 0:
for f in files:
ctl.load(f)
else:
ctl.load("-")
#ctl.ground([("base", [])])
imin = self.minimum_time
step, ret = 0, None
imax = 100
init_time = time.time()
while (step < imin):
for a in range(self.num_agents):
self.resp[a].append((0,0))
parts = []
parts.append(("check", [step]))
parts.append(("step", [step]))
if step > 0:
parts.append(("evolution",[step]))
else:
parts.append(("base", []))
ctl.ground(parts)
step += 1
while (step <= imax):
for a in range(self.num_agents):
self.resp[a].append((0,0))
parts = []
parts.append(("check", [step]))
parts.append(("step", [step]))
if step > 0:
parts.append(("evolution",[step]))
else:
parts.append(("base", []))
#parts.append(("step",[step]))
if step > imin:
ctl.release_external(clingo.Function("query", [step-1]))
ctl.cleanup()
ctl.ground(parts)
ctl.assign_external(clingo.Function("query", [step]), True)
ret = ctl.solve(on_model=self.on_model)
print('solving on makespan {0}'.format(step))
if ret.satisfiable:
self.stats = ctl.statistics
self.makespan = step
self.sol_cost = self.stats['summary']['costs'][0]
delta = self.sol_cost - self.makespan - self.min_sum
new_max = step + delta
if new_max < imax:
imax = new_max
self.theoric_makespan = imax
print('found new makespan bound: {0}'.format(imax))
if not self.solved:
self.solved = True
self.first_stats = self.stats
self.first_makespan = step
self.first_runtime = time.time() - init_time
else:
self.opt_makespan = step
if step == imax:
self.final_solved = True
step += 1
def run_opt_bound(self, ctl, files):
if len(files) > 0:
for f in files:
ctl.load(f)
else:
ctl.load("-")
#ctl.ground([("base", [])])
imin = self.opt_makespan
step, ret = 0, None
imax = 100
while (step < imin):
for a in range(self.num_agents):
self.resp[a].append((0,0))
parts = []
parts.append(("check", [step]))
parts.append(("step", [step]))
if step > 0:
parts.append(("evolution",[step]))
else:
parts.append(("base", []))
ctl.ground(parts)
step += 1
#Last step:
for a in range(self.num_agents):
self.resp[a].append((0,0))
parts = []
parts.append(("check", [step]))
parts.append(("step", [step]))
if step > 0:
parts.append(("evolution",[step]))
else:
parts.append(("base", []))
print(parts)
ctl.ground(parts)
ctl.assign_external(clingo.Function("query", [step]), True)
ret = ctl.solve(on_model=self.on_model)
if ret.satisfiable:
self.stats = ctl.statistics
self.makespan = step
self.sol_cost = self.total_cost + self.stats['summary']['costs'][0]
print(self.sol_cost)
delta = self.sol_cost - self.makespan - self.min_sum
self.theoric_makespan = imin
if not self.solved:
self.solved = True
self.first_stats = self.stats
self.first_makespan = step
self.final_solved = True
self.current_makespan = step
def on_model(self,m):
#print(m.symbols(shown=True))
self.moved_on_goal = False
return
for sym in m.symbols(shown=True):
if sym.name == "current_landmark":
args = sym.arguments
#print(args)
print(args[0].number,args[1].number,args[2].number)
#robot = int(args[0].number)
#self.resp[robot][args[3].number] = (args[1].number,args[2].number)
if sym.name == "on":
args = sym.arguments
#print(args)
robot = int(args[0].number)
self.resp[robot][args[3].number] = (args[1].number,args[2].number)
#if sym.name == "moved_on_goal":
# self.moved_on_goal = True
#if sym.name == "dijkstra":
# print(sym)
#if sym.name == "dijkstra2":
# print(sym)
#if sym.name == "exec":
# args = sym.arguments
# robot = int(args[0].name[-1:])-1
#if sym.name == "penalty":
# args = sym.arguments
# robot = int(args[0].name[-1:])-1
# if robot == 4:
# print(sym)
#print(self.resp)
class ConstantSolver:
def __init__(self, name, minimum_time):
self.program_name = name
self.minimum_time = minimum_time
def main(self, ctl, files):
if len(files) > 0:
for f in files:
ctl.load(f)
else:
ctl.load("-")
ctl.ground([("base", [])])
ret = ctl.solve()
print(json.dumps(ctl.statistics, sort_keys=True, indent=4, separators=(',', ': ')))
if __name__ == '__main__':
try:
print(sys.argv)
print(clingo.__version__)
print("hola")
except:
traceback.print_exc()
x = input()
'''
app = IncrementalSolver(sys.argv[0], 10, 10)
clingo.clingo_main(app, sys.argv[1:])
'''
|
video.py
|
import os
import sys
import dill
from vipy.globals import print
from vipy.util import remkdir, tempMP4, isurl, \
isvideourl, templike, tempjpg, filetail, tempdir, isyoutubeurl, try_import, isnumpy, temppng, \
istuple, islist, isnumber, tolist, filefull, fileext, isS3url, totempdir, flatlist, tocache, premkdir, writecsv, iswebp, ispng, isgif, filepath, Stopwatch, toextension, isjsonfile, isRTSPurl, isRTMPurl
from vipy.image import Image
import vipy.geometry
import vipy.math
import vipy.image
import vipy.downloader
import copy
import numpy as np
import ffmpeg
import urllib.request
import urllib.error
import urllib.parse
import http.client as httplib
import io
import matplotlib.pyplot as plt
import PIL.Image
import warnings
import shutil
import types
import uuid
import platform
import time
from io import BytesIO
import itertools
import vipy.globals
import vipy.activity
import hashlib
from pathlib import PurePath
import queue
import threading
from concurrent.futures import ThreadPoolExecutor
import collections
try:
import ujson as json # faster
except ImportError:
import json
ffmpeg_exe = shutil.which('ffmpeg')
has_ffmpeg = ffmpeg_exe is not None and os.path.exists(ffmpeg_exe)
ffprobe_exe = shutil.which('ffprobe')
has_ffprobe = ffprobe_exe is not None and os.path.exists(ffprobe_exe)
ffplay_exe = shutil.which('ffplay')
has_ffplay = ffplay_exe is not None and os.path.exists(ffplay_exe)
class Stream(object):
"""vipy.video.Stream class.
* This is designed to be accessed as `vipy.video.Video.stream`.
"""
def __init__(self, v, queuesize, write, overwrite, bitrate=None, buffered=False, buflen=16, rebuffered=False):
self._video = v # do not clone
self._write_pipe = None
self._vcodec = 'libx264'
self._bitrate = bitrate # e.g. '2000k', recommended settings for live streaming
self._framerate = self._video.framerate()
self._outfile = self._video.filename()
self._write = write or overwrite
assert self._write is False or (overwrite is True or not os.path.exists(self._outfile)), "Output file '%s' exists - Writable stream cannot overwrite existing video file unless overwrite=True" % self._outfile
if overwrite and os.path.exists(self._outfile):
os.remove(self._outfile)
self._shape = self._video.shape() if (not self._write) or (self._write and self._video.canload()) else None # shape for write can be defined by first frame
assert (write is True or overwrite is True) or self._shape is not None, "Invalid video '%s'" % (str(v))
self._queuesize = queuesize
self._bufsize = int(buflen*v.framerate())
self._buffered = buffered
self._rebuffered = rebuffered
self._bufowner = False
assert self._bufsize >= 1
def __enter__(self):
"""Write pipe context manager"""
assert self._write, "invalid parameters for write only context manager"
if self._shape is not None:
(height, width) = self._shape
outfile = self._outfile if self._outfile is not None else self._url # may be youtube/twitch live stream
outrate = 30 if vipy.util.isRTMPurl(outfile) else self._video.framerate()
fiv = (ffmpeg.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height), r=self._video.framerate())
.filter('pad', 'ceil(iw/2)*2', 'ceil(ih/2)*2'))
fi = ffmpeg.concat(fiv.filter('fps', fps=30, round='up'), ffmpeg.input('anullsrc', f='lavfi'), v=1, a=1) if isRTMPurl(outfile) else fiv # empty audio for youtube-live
kwargs = {'video_bitrate':self._bitrate} if self._bitrate is not None else {}
fo = (fi.output(filename=self._outfile if self._outfile is not None else self._url,
pix_fmt='yuv420p',
vcodec=self._vcodec,
f='flv' if vipy.util.isRTMPurl(outfile) else vipy.util.fileext(outfile, withdot=False),
g=2*outrate,
**kwargs)
.overwrite_output()
.global_args('-cpuflags', '0', '-loglevel', 'quiet' if not vipy.globals.isdebug() else 'debug'))
self._write_pipe = fo.run_async(pipe_stdin=True)
self._writeindex = 0
return self
def __exit__(self, type, value, tb):
"""Write pipe context manager
..note:: This is triggered on ctrl-c as the last step for cleanup
"""
if self._write_pipe is not None:
self._write_pipe.stdin.close()
self._write_pipe.wait()
del self._write_pipe
self._write_pipe = None
if type is not None:
raise
return self
def __call__(self, im):
"""alias for write()"""
return self.write(im)
def _read_pipe(self):
if not self._video.isloaded():
p = self._video._ffmpeg.output('pipe:', format='rawvideo', pix_fmt='rgb24').global_args('-nostdin', '-loglevel', 'debug' if vipy.globals.isdebug() else 'quiet').run_async(pipe_stdout=True, pipe_stderr=True)
assert p is not None, "Invalid read pipe"
p.poll()
return p
else:
return None
def framerate(self):
return self._video.framerate()
def __iter__(self):
"""Stream individual video frames.
This iterator is the primary mechanism for streaming frames and clips from long videos or live video streams.
- The stream is constructed from a shared underlying video in self._video.
- As the video is updated with annotations, the stream can generate frames and clips that contain these annotations
- The shared underlying video allows for multiple iterators all sourced from the same video, iterating over different frames and rates
- The iterator leverages a pipe to FFMPEG, reading numpy frames from the video filter chain.
- The pipe is written from a thread which is dedicated to reading frames from ffmpeg
- Each numpy frame is added to a queue, with a null termintor when end of stream is reached
- The iterator then reads from the queue, and returns annotated frames
This iterator can also be used as a buffered stream. Buffered streams have a primary iterator which saves a fixed stream buffer
of frames so that subsequent iterators can pull temporally aligned frames. This is useful to avoid having multiple FFMPEG pipes
open simultaneously, and can allow for synchronized access to video streams without timestamping.
- The primary iterator is the first iterator over the video with stream(buffered=True)
- The primary iterator creates a private attribute self._video.attributes['__stream_buffer'] which caches frames
- The stream buffer saves numpy arrays from the iterator with a fixed buffer length (number of frames)
- The secondary iterator (e.g. any iterator that accesses the video after the primary iterator is initially created) will read from the stream buffer
- All iterators share the underlying self._video object in the stream so that if the video annotations are updated by an iterator, the annotated frames are accessible in the iterators
- The secondary iterators are synchronized to the stream buffer that is read by the primary iterator. This is useful for synchronizing streams for live camera streams without absolute timestamps.
- There can be an unlimited number of secondary iterators, without incurring a penalty on frame access
This iterator can iterate over clips, frames or batches.
- A clip is a sequence of frames such that each clip is separated by a fixed number of frames.
- Clips are useful for temporal encoding of short atomic activities
- A batch is a sequence of n frames with a stride of n.
- A batch is useful for iterating over groups of frames that are operated in parallel on a GPU
>>> for (im1, im2, v3) in zip(v.stream(buffered=True), v.stream(buffered=True).frame(delay=30), v.stream(buffered=True).clip(n=16,m-1):
>>> # im1: `vipy.image.Scene` at frame index k
>>> # im2: `vipy.image.Scene` at frame index k-30
>>> # v3: `vipy.video.Scene` at frame range [k, k-16]
"""
try:
if self._video.isloaded():
# For loaded video, just use the existing iterator for in-memory video
for k in range(len(self._video)):
yield self._video[k]
elif not self._buffered or self._rebuffered or (self._buffered and not self._video.hasattribute('__stream_buffer')):
# First stream iterator: read from video and store in stream buffer for all other iterators to access
if self._rebuffered or (self._buffered and not self._video.hasattribute('__stream_buffer')):
self._video.attributes['__stream_buffer'] = []
self._bufowner = True # track which iterator created the stream buffer for cleanup in 'finally'
# Video pipe thread: read numpy frames from the ffmpeg filter chain via a pipe
# stre the resulting frames in a queue with a null terminated frame when the stream ends
p = self._read_pipe()
q = queue.Queue(self._queuesize)
(h, w) = self._shape
def _f_threadloop(pipe, queue, height, width, event):
assert pipe is not None, "Invalid pipe"
assert queue is not None, "invalid queue"
while True:
in_bytes = pipe.stdout.read(height * width * 3)
if not in_bytes:
queue.put(None)
pipe.poll()
pipe.wait()
if pipe.returncode != 0:
raise ValueError('Stream iterator exited with returncode %d' % (pipe.returncode))
event.wait()
break
else:
queue.put(np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]))
e = threading.Event()
t = threading.Thread(target=_f_threadloop, args=(p, q, h, w, e), daemon=True)
t.start()
# Primary iterator:
# -read frames from the queue and store in the stream buffer
# -Frames are also yielded for the primary iterator
# -The stream buffer is always n frames long, and if the newest frame from the pipe is frame k, the oldest frame is k-n which is yielded first
# -If the stream is unbuffered, just read from the queue directory and yield the numpy frame
k = 0
b = self._video.attributes['__stream_buffer'] if (self._buffered or self._rebuffered) else None
while True:
if b is not None:
while len(b) < self._bufsize and ((len(b) == 0) or (len(b) > 0 and b[-1][1] is not None)):
b.append( (k, q.get()) ) # re-fill stream buffer
k += 1
(f,img) = b[0] # read oldest element
else:
(f,img) = (k,q.get()) # read from thread queue, unbuffered
k += 1
if img is not None:
yield self._video.frame(f, img) # yield a vipy.image.Scene object with annotations at frame f, using the latest anntoation from the shared video object
if b is not None:
b.pop(0) # remove from filled buffer after secondary iterators have seen it
else:
e.set()
break # termination
elif self._buffered and self._video.hasattribute('__stream_buffer'):
# Secondary iterators: read frames from the stream buffer
# -The stream buffer is a simple list stored in the self._video as a private attribute. This is is so that the video can be serialized (e.g. no Queues)
# -The secondary iterators search the stream buffer for a matching frame index to yield.
# -The stream buffer is filled by the primary iterator, so that index 0 is the oldest frame and index n is the newest frame
# -The secondary iterators sleep in order to release the GIL before searching the frame buffer for the next frame to yield.
# -This is a bit ugly, but is a compromise to preserve pickleability of the stream buffer.
k = 0
while '__stream_buffer' in self._video.attributes and len(self._video.attributes['__stream_buffer']) > 0:
for (f, img) in self._video.attributes['__stream_buffer']:
if f == k and img is not None:
yield self._video.frame(f, img) # yield a vipy.image.Scene object with annotations at frame f, using the latest annotations from the shared video object
k += 1
break
elif f ==k and img is None:
return
time.sleep(0.01) # release GIL, yuck ...
else:
raise # should never get here
except:
raise
finally:
if self._bufowner:
self._video.delattribute('__stream_buffer') # cleanup, or force a reinitialization by passing the rebuffered=True to the primary iterator
def __getitem__(self, k):
"""Retrieve individual frame index - this is inefficient, use __iter__ instead"""
return self._video.preview(frame=k) # this is inefficient
def write(self, im, flush=False):
"""Write individual frames to write stream"""
assert isinstance(im, vipy.image.Image)
if self._shape is None:
self._shape = im.shape()
assert im.channels() == 3, "RGB frames required"
self.__enter__()
assert self._write_pipe is not None, "Write stream cannot be initialized"
assert im.shape() == self._shape, "Shape cannot change during writing"
self._write_pipe.stdin.write(im.array().astype(np.uint8).tobytes())
if flush:
self._write_pipe.stdin.flush() # do we need this?
if isinstance(im, vipy.image.Scene) and len(im.objects()) > 0 and isinstance(self._video, vipy.video.Scene):
for obj in im.objects():
self._video.add(obj, frame=self._writeindex, rangecheck=False)
self._writeindex += 1 # assumes that the source image is at the appropriate frame rate for this video
def clip(self, n, m=1, continuous=False, tracks=True, activities=True, delay=0, ragged=False):
"""Stream clips of length n such that the yielded video clip contains frame(0+delay) to frame(n+delay), and next contains frame(m+delay) to frame(n+m+delay).
Usage examples:
>>> for vc in v.stream().clip(n=16, m=2):
>>> # yields video vc with frames [0,16] from v
>>> # then video vc with frames [2,18] from v
>>> # ... finally video with frames [len(v)-n-1, len(v)-1]
Introducing a delay so that the clips start at a temporal offset from v
>>> for vc in v.stream().clip(n=8, m=3, delay=1):
>>> # yields video vc with frames [1,9]
>>> # then video vc with frames [4,12] ...
Args:
n: [int] the length of the clip in frames
m: [int] the stride between clips in frames
delay: [int] The temporal delay in frames for the clip, must be less than n and >= 0
continuous: [bool] if true, then yield None for the sequential frames not aligned with a stride so that a clip is yielded on every frame
activities: [bool] if false, then activities from the source video are not copied into the clip
tracks: [bool] if false, then tracks from the source video are not copied into the clip
Returns:
An iterator that yields `vipy.video.Video` objects each of length n with startframe += m, starting at frame=delay, such that each video contains the tracks and activities (if requested) for this clip sourced from the shared stream video.
"""
assert isinstance(n, int) and n>0, "Clip length must be a positive integer"
assert isinstance(m, int) and m>0, "Clip stride must be a positive integer"
assert isinstance(delay, int) and delay >= 0 and delay < n, "Clip delay must be a positive integer less than n"
vc = self._video.clone(flushfilter=True).clear().nourl().nofilename()
f_copy_annotations = lambda v, fr, k, n: (v.clear().clone(shallow=True).fromframes(fr)
.activities([a.clone().offset(-(k-(n-1))).truncate(0,n-1) for (ak,a) in self._video.activities().items() if a.during_interval(k-(n-1), k, inclusive=False)] if activities else [])
.tracks([t.clone(k-(n-1), k).offset(-(k-(n-1))).truncate(0,n-1) for (tk,t) in self._video.tracks().items() if t.during_interval(k-(n-1), k)] if tracks else [])
if (v is not None and isinstance(v, vipy.video.Scene)) else
v.clear().clone(shallow=True).fromframes(fr))
(frames, newframes) = ([], [])
for (k,im) in enumerate(self):
newframes.append(im)
if len(newframes) >= m and len(frames)+len(newframes) >= n:
# Use frameindex+1 so that we include (0,1), (1,2), (2,3), ... for n=2, m=1
# The delay shifts the clip +delay frames (1,2,3), (3,4,5), ... for n=3, m=2, delay=1
frames.extend(newframes)
(frames, newframes) = (frames[-n:], [])
yield f_copy_annotations(vc, frames, k, n)
elif continuous:
yield None
if ragged and len(newframes) > 0:
yield f_copy_annotations(vc, newframes, k, len(newframes))
def batch(self, n):
"""Stream batches of length n such that each batch contains frames [0,n], [n+1, 2n], ... Last batch will be ragged.
The primary use case for batch() is to provide a mechanism for parallel batch processing on a GPU.
>>> for (im, im_gpu) in zip(vi, myfunc(vi.stream().batch(16))):
>>>
>>> def myfunc(gen):
>>> for vb in gen:
>>> # process the batch vb (length n) in parallel by encoding on a GPU with batchsize=n
>>> for im in f_gpu(vb):
>>> yield im_gpu:
This will then yield the GPU batched processed image im_gpu zipped with the original image im.
"""
return self.clip(n=n, m=n, continuous=False, ragged=True)
def frame(self, delay=0):
"""Stream individual frames of video with negative offset n frames to the stream head. If delay=30, this will return a frame 30 frames ago"""
assert isinstance(delay, int) and delay >= 0, "Frame delay must be non-positive integer"
n = -delay
frames = []
i = 0
for (k,im) in enumerate(self):
frames.append( (k,im) )
(kout, imout) = frames[0]
frames.pop(0) if len(frames) > abs(n) else None
i = k
yield self._video.frame(kout, imout.array()) if len(frames) == delay else None # refetch for track interpolation
class Video(object):
""" vipy.video.Video class
The vipy.video class provides a fluent, lazy interface for representing, transforming and visualizing videos.
The following constructors are supported:
>>> vid = vipy.video.Video(filename='/path/to/video.ext')
Valid video extensions are those that are supported by ffmpeg ['.avi','.mp4','.mov','.wmv','.mpg', 'mkv', 'webm'].
>>> vid = vipy.video.Video(url='https://www.youtube.com/watch?v=MrIN959JuV8')
>>> vid = vipy.video.Video(url='http://path/to/video.ext', filename='/path/to/video.ext')
Youtube URLs are downloaded to a temporary filename, retrievable as vid.download().filename(). If the environment
variable 'VIPY_CACHE' is defined, then videos are saved to this directory rather than the system temporary directory.
If a filename is provided to the constructor, then that filename will be used instead of a temp or cached filename.
URLs can be defined as an absolute URL to a video file, or to a site supported by 'youtube-dl' (https://ytdl-org.github.io/youtube-dl/supportedsites.html)
>>> vid = vipy.video.Video(url='s3://BUCKET.s3.amazonaws.com/PATH/video.ext')
If you set the environment variables VIPY_AWS_ACCESS_KEY_ID and VIPY_AWS_SECRET_ACCESS_KEY, then this will download videos directly from S3 using boto3 and store in VIPY_CACHE.
Note that the URL protocol should be 's3' and not 'http' to enable keyed downloads.
>>> vid = vipy.video.Video(array=array, colorspace='rgb')
The input 'array' is an NxHxWx3 numpy array corresponding to an N-length list of HxWx3 uint8 numpy array which is a single frame of pre-loaded video
Note that some video transformations are only available prior to load(), and the array() is assumed immutable after load().
>>> frames = [im for im in vipy.video.RandomVideo()]
>>> vid = vipy.video.Video(frames=frames)
The input can be an RTSP video stream. Note that streaming is most efficiently performed using `vipy.video.Scene`. The URL must contain the 'rtsp://' url scheme.
You can experiment with this using the free Periscope H.264 RTSP App (https://apps.apple.com/us/app/periscope-hd-h-264-rtsp-cam/id1095600218)
>>> vipy.video.Scene(url='rtsp://127.0.0.1:8554/live.sdp').show()
>>> for im in vipy.video.Scene(url='rtsp://127.0.0.1:8554/live.sdp').stream():
>>> print(im)
See also 'pip install heyvi'
Args:
filename: [str] The path to a video file.
url: [str] The URL to a video file. If filename is not provided, then a random filename is assigned in VIPY_CACHE on download
framerate: [float] The framerate of the video file. This is required. You can introspect this using ffprobe.
attributes: [dict] A user supplied dictionary of metadata about this video.
colorspace: [str] Must be in ['rgb', 'float']
array: [numpy] An NxHxWxC numpy array for N frames each HxWxC shape
startframe: [int] A start frame to clip the video
endframe: [int] An end frame to clip the video
startsec: [float] A start time in seconds to clip the video (this requires setting framerate)
endsec: [float] An end time in seconds to clip the video (this requires setting framerate)
frames: [list of `vipy.image.Image`] A list of frames in the video
probeshape: [bool] If true, then probe the shape of the video from ffprobe to avoid an explicit preview later. This can speed up loading in some circumstances.
"""
def __init__(self, filename=None, url=None, framerate=30.0, attributes=None, array=None, colorspace=None, startframe=None, endframe=None, startsec=None, endsec=None, frames=None, probeshape=False):
self._url = None
self._filename = None
self._array = None
self._colorspace = None
self._ffmpeg = None
self._framerate = None
self.attributes = attributes if attributes is not None else {}
assert isinstance(self.attributes, dict), "Attributes must be a python dictionary"
assert filename is not None or url is not None or array is not None or frames is not None, 'Invalid constructor - Requires "filename", "url" or "array" or "frames"'
assert not isurl(filename)
# FFMPEG installed?
if not has_ffmpeg:
warnings.warn('"ffmpeg" executable not found on path, this is required for vipy.video - Install from http://ffmpeg.org/download.html')
# Constructor clips
startframe = startframe if startframe is not None else (0 if endframe is not None else startframe)
assert (startsec is not None and endsec is not None) or (startsec is None and endsec is None), "Invalid input - (startsec,endsec) are both required"
(self._startframe, self._endframe) = (None, None) # __repr__ only
(self._startsec, self._endsec) = (None, None) # __repr__ only (legacy, no longer used)
# Input filenames
if url is not None:
assert isurl(url), 'Invalid URL "%s" ' % url
self._url = url
if filename is not None:
self._filename = os.path.normpath(os.path.expanduser(filename))
elif self._url is not None:
if isS3url(self._url):
self._filename = totempdir(self._url) # Preserve S3 Object ID
elif isRTSPurl(self._url) or isRTMPurl(self._url):
# https://ffmpeg.org/ffmpeg-protocols.html#rtsp
self._filename = self._url
elif isvideourl(self._url):
self._filename = templike(self._url)
elif isyoutubeurl(self._url):
self._filename = os.path.join(tempdir(), '%s' % self._url.split('?')[1].split('&')[0])
else:
self._filename = totempdir(self._url)
if vipy.globals.cache() is not None and self._filename is not None and not isRTSPurl(self._filename) and not isRTMPurl(self._filename):
self._filename = os.path.join(remkdir(vipy.globals.cache()), filetail(self._filename))
# Initial video shape: useful to avoid preview()
self._ffmpeg = ffmpeg.input(self.filename()) # restore, no other filters
if probeshape and (frames is None and array is None) and has_ffprobe and self.hasfilename():
self.shape(self.probeshape())
else:
self._shape = None # preview() on shape()
# Video filter chain
if framerate is not None:
if array is None and frames is None:
self.framerate(framerate)
self._framerate = framerate
if startframe is not None:
self.clip(startframe, endframe)
if startsec is not None:
# WARNING: if the user does not supply the correct framerate for the video, then this will be wrong since these are converted to frames
self.clip(int(round(startsec/self.framerate())), int(round(endsec/self.framerate())) if endsec is not None else None)
# Array input
assert not (array is not None and frames is not None)
if array is not None:
self.array(array)
self.colorspace(colorspace)
elif frames is not None and (isinstance(frames, list) or isinstance(frames, tuple)) and all([isinstance(im, vipy.image.Image) for im in frames]):
self.fromframes(frames)
elif frames is not None and (isinstance(frames, list) or isinstance(frames, tuple)) and all([isinstance(im, str) and os.path.exists(im) for im in frames]):
self.fromframes([vipy.image.Image(filename=f) for f in frames])
elif frames is not None and (isinstance(frames, str) and os.path.isdir(frames)):
self.fromdirectory(frames)
@classmethod
def cast(cls, v):
"""Cast a conformal video object to a `vipy.video.Video` object.
This is useful for downcasting superclasses.
>>> vs = vipy.video.RandomScene()
>>> v = vipy.video.Video.cast(vs)
"""
assert isinstance(v, vipy.video.Video), "Invalid input - must be derived from vipy.video.Video"
v.__class__ = vipy.video.Video
return v
@classmethod
def from_json(cls, s):
"""Import a json string as a `vipy.video.Video` object.
This will perform a round trip from a video to json and back to a video object.
This same operation is used for serialization of all vipy objects to JSON for storage.
>>> v = vipy.video.Video.from_json(vipy.video.RandomVideo().json())
"""
d = json.loads(s) if not isinstance(s, dict) else s
v = cls(filename=d['_filename'],
url=d['_url'],
framerate=d['_framerate'],
array=np.array(d['_array']) if d['_array'] is not None else None,
colorspace=d['_colorspace'],
attributes=d['attributes'],
startframe=d['_startframe'],
endframe=d['_endframe'],
startsec=d['_startsec'],
endsec=d['_endsec'])
v._ffmpeg = v._from_ffmpeg_commandline(d['_ffmpeg'])
return v.filename(d['_filename']) if d['_filename'] is not None else v.nofilename()
def __repr__(self):
strlist = []
if self.isloaded():
strlist.append("height=%d, width=%d, frames=%d, color=%s" % (self.height(), self.width(), len(self), self.colorspace()))
if self.filename() is not None:
strlist.append('filename="%s"' % self.filename())
if self.hasurl():
strlist.append('url="%s"' % self.url())
if not self.isloaded() and self._startframe is not None and self._endframe is not None:
strlist.append('clip=(%d,%d)' % (self._startframe, self._endframe))
if not self.isloaded() and self._startframe is not None and self._endframe is None:
strlist.append('clip=(%d,)' % (self._startframe))
if self._framerate is not None:
strlist.append('fps=%1.1f' % float(self._framerate))
return str('<vipy.video: %s>' % (', '.join(strlist)))
def __len__(self):
"""Number of frames in the video if loaded, else zero.
.. notes:: Do not automatically trigger a load, since this can interact in unexpected ways with other tools that depend on fast __len__()
"""
if not self.isloaded():
warnings.warn('Load() video to see number of frames - Returning zero') # should this just throw an exception?
return len(self.array()) if self.isloaded() else 0
def __getitem__(self, k):
"""Alias for `vipy.video.Video.frame`"""
return self.frame(k)
def metadata(self):
"""Return a dictionary of metadata about this video.
This is an alias for the 'attributes' dictionary.
"""
return self.attributes
def sanitize(self):
"""Remove all private keys from the attributes dictionary.
The attributes dictionary is useful storage for arbitrary (key,value) pairs. However, this storage may contain sensitive information that should be scrubbed from the video before serialization. As a general rule, any key that is of the form '__keyname' prepended by two underscores is a private key. This is analogous to private or reserved attributes in the python lanugage. Users should reserve these keynames for those keys that should be sanitized and removed before any seerialization of this object.
>>> assert self.setattribute('__mykey', 1).sanitize().hasattribute('__mykey') == False
"""
if self._has_private_attribute():
self.attributes = {k:v for (k,v) in self.attributes.items() if not k.startswith('__')}
return self
def videoid(self, newid=None):
"""Return a unique video identifier for this video, as specified in the 'video_id' attribute, or by SHA1 hash of the `vipy.video.Video.filename` and `vipy.video.Video.url`.
Args:
newid: [str] If not None, then update the video_id as newid.
Returns:
The video ID if newid=None else self
.. note::
- If the video filename changes (e.g. from transformation), and video_id is not set in self.attributes, then the video ID will change.
- If a video does not have a filename or URL or a video ID in the attributes, then this will return None
- To preserve a video ID independent of transformations, set self.setattribute('video_id', ${MY_ID}), or pass in newid
"""
if newid is not None:
self.setattribute('video_id', newid)
return self
else:
return self.attributes['video_id'] if 'video_id' in self.attributes else (hashlib.sha1(str(str(self.filename())+str(self.url())).encode("UTF-8")).hexdigest() if (self.filename() is not None or self.url() is not None) else None)
def frame(self, k, img=None):
"""Return the kth frame as an `vipy.image Image` object"""
assert isinstance(k, int) and k>=0, "Frame index must be non-negative integer"
return Image(array=img if img is not None else (self._array[k] if self.isloaded() else self.preview(k).array()), colorspace=self.colorspace())
def __iter__(self):
"""Iterate over video, yielding read only frames.
>>> for im in vipy.video.RandomScene():
>>> print(im)
"""
return self.stream().__iter__()
def store(self):
"""Store the current video file as an attribute of this object.
Useful for archiving an object to be fully self contained without any external references.
>>> v == v.store().restore(v.filename())
.. note::
-Remove this stored video using unstore()
-Unpack this stored video and set up the video chains using restore()
-This method is more efficient than load() followed by pkl(), as it stores the encoded video as a byte string.
-Useful for creating a single self contained object for distributed processing.
"""
assert self.hasfilename(), "Video file not found"
with open(self.filename(), 'rb') as f:
self.attributes['__video__'] = f.read()
return self
def unstore(self):
"""Delete the currently stored video from `vipy.video.Video.store"""
return self.delattribute('__video__')
def restore(self, filename):
"""Save the currently stored video as set using `vipy.video.Video.store` to filename, and set up filename"""
assert self.hasattribute('__video__'), "Video not stored"
with open(filename, 'wb') as f:
f.write(self.attributes['__video__'])
return self.filename(filename)
@classmethod
def concatenate(cls, videos, outfile, framerate=30, youtube_chapters=None):
"""Temporally concatenate a sequence of videos into a single video stored in outfile.
>>> (v1, v2, v3) = (vipy.video.RandomVideo(128,128,32), vipy.video.RandomVideo(128,128,32), vipy.video.RandomVideo(128,128,32))
>>> vc = vipy.video.Video.concatenate((v1, v2, v3), 'concatenated.mp4', youtube_chapters=lambda v: v.category())
In this example, vc will point to concatenated.mp4 which will contain (v1,v2,v3) concatenated temporally .
Input:
videos: a single video or an iterable of videos of type `vipy.video.Video` or an iterable of video files
outfile: the output filename to store the concatenation.
youtube_chapters [bool, callable]: If true, output a string that can be used to define the start and end times of chapters if this video is uploaded to youtube. The string output should be copied to the youtube video description in order to enable chapters on playback. This argument will default to the string representation ofo the video, but you may also pass a callable of the form: 'youtube_chapters=lambda v: str(v)' which will output the provided string for each video chapter. A useful lambda is 'youtube_chapters=lambda v: v.category()'
framerate [float]: The output frame rate of outfile
Returns:
A `vipy.video.Video` object with filename()=outfile, such that outfile contains the temporal concatenation of pixels in (self, videos).
.. note::
-self will not be modified, this will return a new `vipy.video.Video` object.
-All videos must be the same shape(). If the videos are different shapes, you must pad them to a common size equal to self.shape(). Try `vipy.video.Video.zeropadlike`.
-The output video will be at the framerate of self.framerate().
-if you want to concatenate annotations, call `vipy.video.Scene.annotate` first on the videos to save the annotations into the pixels, then concatenate.
"""
assert len(tolist(videos))>0 and (all([isinstance(v, vipy.video.Video) for v in tolist(videos)]) or all([os.path.exists(f) and vipy.util.isvideofile(f) for f in tolist(videos)]))
vi = tolist(videos) if all([isinstance(v, vipy.video.Video) for v in tolist(videos)]) else [cls(filename=f) for f in tolist(videos)]
assert all([vij.shape() == vik.shape() for vij in vi for vik in vi]), "Video shapes must all the same, try padding"
vo = cls(filename=outfile, framerate=vi[0].framerate())
with vo.stream(overwrite=True) as s:
for v in vi:
for im in v.clone().framerate(framerate).stream():
s.write(im)
if youtube_chapters is not None:
f = youtube_chapters if callable(youtube_chapters) else lambda v: str(v).replace('<','').replace('>','') # angle brackets not allowed
print('[vipy.video.concatenate]: Copy the following into the video Description after uploading the videofile "%s" to YouTube to enable chapters on playback.\n' % outfile)
print('\n'.join(['%s %s' % (vipy.util.seconds_to_MMSS_colon_notation(int(s)), str(f(v))) for (s,v) in zip(np.cumsum([0] + [v.duration() for v in vi][:-1]), vi)])); print('\n')
if any([v.duration() < 10 for v in vi]):
warnings.warn('YouTube chapters must be a minimum duration of 10 seconds')
return vo
def stream(self, write=False, overwrite=False, queuesize=1024, bitrate=None, buffered=False, rebuffered=False):
"""Iterator to yield groups of frames streaming from video.
A video stream is a real time iterator to read or write from a video. Streams are useful to group together frames into clips that are operated on as a group.
The following use cases are supported:
>>> v = vipy.video.RandomScene()
Stream individual video frames lagged by 10 frames and 20 frames
>>> for (im1, im2) in zip(v.stream().frame(n=-10), v.stream().frame(n=-20)):
>>> print(im1, im2)
Stream overlapping clips such that each clip is a video n=16 frames long and starts at frame i, and the next clip is n=16 frames long and starts at frame i=i+m
>>> for vc in v.stream().clip(n=16, m=4):
>>> print(vc)
Stream non-overlapping batches of frames such that each clip is a video of length n and starts at frame i, and the next clip is length n and starts at frame i+n
>>> for vb in v.stream().batch(n=16):
>>> print(vb)
Create a write stream to incrementally add frames to long video.
>>> vi = vipy.video.Video(filename='/path/to/output.mp4')
>>> vo = vipy.video.Video(filename='/path/to/input.mp4')
>>> with vo.stream(write=True) as s:
>>> for im in vi.stream():
>>> s.write(im) # manipulate pixels of im, if desired
Create a 480p YouTube live stream from an RTSP camera at 5Hz
>>> vo = vipy.video.Scene(url='rtmp://a.rtmp.youtube.com/live2/$SECRET_STREAM_KEY')
>>> vi = vipy.video.Scene(url='rtsp://URL').framerate(5)
>>> with vo.framerate(5).stream(write=True, bitrate='1000k') as s:
>>> for im in vi.framerate(5).resize(cols=854, rows=480):
>>> s.write(im)
Args:
write: [bool] If true, create a write stream
overwrite: [bool] If true, and the video output filename already exists, overwrite it
bufsize: [int] The maximum queue size for the pipe thread.
bitrate: [str] The ffmpeg bitrate of the output encoder for writing, written like '2000k'
Returns:
A Stream object
..note:: Using this iterator may affect PDB debugging due to stdout/stdin redirection. Use ipdb instead.
"""
return Stream(self, queuesize=queuesize, write=write, overwrite=overwrite, bitrate=bitrate, buffered=buffered, rebuffered=rebuffered) # do not clone
def clear(self):
"""no-op for `vipy.video.Video` object, used only for `vipy.video.Scene`"""
return self
def bytes(self):
"""Return a bytes representation of the video file"""
assert self.hasfilename(), "Invalid filename"
with open(self.filename(), 'rb') as f:
data = io.BytesIO(f.read())
return str(data.read()).encode('UTF-8')
def frames(self):
"""Alias for __iter__()"""
return self.__iter__()
def framelist(self):
return list(self.frames())
def _update_ffmpeg_seek(self, timestamp_in_seconds=0, offset=0):
if timestamp_in_seconds == 0 and offset == 0:
return self
nodes = ffmpeg.nodes.get_stream_spec_nodes(self._ffmpeg)
sorted_nodes, outgoing_edge_maps = ffmpeg.dag.topo_sort(nodes)
for n in sorted_nodes:
if 'input' == n.__dict__['name']:
if 'ss' not in n.__dict__['kwargs']:
n.__dict__['kwargs']['ss'] = 0
if timestamp_in_seconds == 0:
n.__dict__['kwargs']['ss'] = n.__dict__['kwargs']['ss'] + offset
else:
n.__dict__['kwargs']['ss'] = timestamp_in_seconds + offset
return self
raise ValueError('invalid ffmpeg argument "%s" -> "%s"' % ('ss', timestamp_in_seconds))
def _update_ffmpeg(self, argname, argval, node_name=None):
"""Update the ffmpeg filter chain to overwrite the (argname, argval) elements.
Useful for fine-tuning a filter chain without rewwriring the whole thing.
"""
nodes = ffmpeg.nodes.get_stream_spec_nodes(self._ffmpeg)
sorted_nodes, outgoing_edge_maps = ffmpeg.dag.topo_sort(nodes)
for n in sorted_nodes:
if argname in n.__dict__['kwargs'] or node_name == n.__dict__['name']:
n.__dict__['kwargs'][argname] = argval
return self
raise ValueError('invalid ffmpeg argument "%s" -> "%s"' % (argname, argval))
def _ffmpeg_commandline(self, f=None):
"""Return the ffmpeg command line string that will be used to process the video"""
cmd = f.compile() if f is not None else self._ffmpeg.output('dummyfile').compile()
for (k,c) in enumerate(cmd):
if c is None:
cmd[k] = str(c)
elif 'filter' in c:
cmd[k+1] = '"%s"' % str(cmd[k+1])
elif 'map' in c:
cmd[k+1] = '"%s"' % str(cmd[k+1])
return str(' ').join(cmd)
def commandline(self):
"""Return the equivalent ffmpeg command line string that will be used to transcode the video.
This is useful for introspecting the complex filter chain that will be used to process the video. You can try to run this command line yourself for debugging purposes, by replacing 'dummyfile' with an appropriately named output file.
"""
return self._ffmpeg_commandline()
def _from_ffmpeg_commandline(self, cmd, strict=False):
"""Convert the ffmpeg command line string (e.g. from `vipy.video.Video.commandline`) to the corresponding ffmpeg-python filter chain and update self"""
args = copy.copy(cmd).replace(str(self.filename()), 'FILENAME').split(' ') # filename may contain spaces
assert args[0] == 'ffmpeg', "Invalid FFMEG commmand line '%s'" % cmd
assert args[1] == '-i' or (args[3] == '-i' and args[1] == '-ss'), "Invalid FFMEG commmand line '%s'" % cmd
assert args[-1] == 'dummyfile', "Invalid FFMEG commmand line '%s'" % cmd
assert len(args) >= 4, "Invalid FFMEG commmand line '%s'" % cmd
if args[1] == '-ss':
timestamp_in_seconds = float(args[2])
timestamp_in_seconds = int(timestamp_in_seconds) if timestamp_in_seconds == 0 else timestamp_in_seconds # 0.0 -> 0
args = [args[0]] + args[3:]
f = ffmpeg.input(args[2].replace('FILENAME', self.filename()), ss=timestamp_in_seconds) # restore filename, set offset time
self._startframe = int(round(timestamp_in_seconds*self.framerate())) # necessary for clip() and __repr__
else:
f = ffmpeg.input(args[2].replace('FILENAME', self.filename())) # restore filename
if len(args) > 4:
assert args[3] == '-filter_complex', "Invalid FFMEG commmand line '%s'" % cmd
assert args[4][0] == '"' and args[4][-1] == '"', "Invalid FFMEG commmand line '%s'" % cmd
filterargs = args[4][1:-1].split(';')
for a in filterargs:
assert a.count(']') == 2 and a.count('[') == 2
kwstr = a.split(']', maxsplit=1)[1].split('[', maxsplit=1)[0]
if kwstr.count('=') == 0:
f = f.filter(kwstr)
else:
(a, kw) = ([], {})
(filtername, kwstr) = kwstr.split('=', maxsplit=1)
for s in kwstr.split(':'):
if s.count('=') > 0:
(k,v) = s.split('=')
kw[k] = v
else:
a.append(s)
if 'end' in kw:
self._endframe = (self._startframe if self._startframe is not None else 0) + int(round(float(kw['end'])*self.framerate())) # for __repr__
if 'start' in kw:
pass
if 'start_frame' in kw or 'end_frame' in kw:
f = f.setpts('PTS-STARTPTS') # reset timestamp to 0 before trim filter in seconds
if 'end_frame' in kw:
self._endframe = (self._startframe if self._startframe is not None else 0) + int(kw['end_frame']) # for __repr__
kw['end'] = int(kw['end_frame'])/self.framerate() # convert end_frame to end (legacy)
del kw['end_frame'] # use only end and not end frame
if 'start_frame' in kw:
self._startframe = (self._startframe if self._startframe is not None else 0) + int(kw['start_frame']) # for __repr__
kw['start'] = int(kw['start_frame'])/self.framerate() # convert start_frame to start (legacy)
del kw['start_frame'] # use only start and not start_frame
f = f.filter(filtername, *a, **kw)
if strict:
assert self._ffmpeg_commandline(f.output('dummyfile')) == cmd, "FFMPEG command line '%s' != '%s'" % (self._ffmpeg_commandline(f.output('dummyfile')), cmd)
return f
def _isdirty(self):
"""Has the FFMPEG filter chain been modified from the default? If so, then ffplay() on the video file will be different from self.load().play()"""
return '-filter_complex' in self._ffmpeg_commandline()
def probeshape(self):
"""Return the (height, width) of underlying video file as determined from ffprobe
.. warning:: this does not take into account any applied ffmpeg filters. The shape will be the (height, width) of the underlying video file.
"""
p = self.probe()
assert len(p['streams']) > 0
return (p['streams'][0]['height'], p['streams'][0]['width'])
def duration_in_seconds_of_videofile(self):
"""Return video duration of the source filename (NOT the filter chain) in seconds, requires ffprobe. Fetch once and cache.
.. notes:: This is the duration of the source video and NOT the duration of the filter chain. If you load(), this may be different duration depending on clip() or framerate() directives.
"""
filehash = hashlib.md5(str(self.filename()).encode()).hexdigest()
if self.hasattribute('_duration_in_seconds_of_videofile') and self.attributes['__duration_in_seconds_of_videofile']['filehash'] == filehash:
return self.attributes['__duration_in_seconds_of_videofile']['duration']
else:
d = float(self.probe()['format']['duration'])
self.attributes['__duration_in_seconds_of_videofile'] = {'duration':d, 'filehash':filehash} # for next time, private attribute
return d
def duration_in_frames_of_videofile(self):
"""Return video duration of the source video file (NOT the filter chain) in frames, requires ffprobe.
.. notes:: This is the duration of the source video and NOT the duration of the filter chain. If you load(), this may be different duration depending on clip() or framerate() directives.
"""
return int(np.floor(self.duration_in_seconds_of_videofile()*self.framerate_of_videofile()))
def duration(self, frames=None, seconds=None, minutes=None):
"""Return a video clipped with frame indexes between (0, frames) or (0,seconds*self.framerate()) or (0,minutes*60*self.framerate(). Return duration in seconds if no arguments are provided."""
if frames is None and seconds is None and minutes is None:
return self.duration_in_seconds_of_videofile() if not self.isloaded() else (len(self) / self.framerate())
assert frames is not None or seconds is not None or minutes is not None
frames = frames if frames is not None else ((int(seconds*self.framerate()) if seconds is not None else 0) + (int(minutes*60*self.framerate()) if minutes is not None else 0))
return self.clip(0, frames)
def framerate_of_videofile(self):
"""Return video framerate in frames per second of the source video file (NOT the filter chain), requires ffprobe.
"""
p = self.probe()
assert 'streams' in p and len(['streams']) > 0
fps = p['streams'][0]['avg_frame_rate']
return float(fps) if '/' not in fps else (float(fps.split('/')[0]) / float(fps.split('/')[1])) # fps='30/1' or fps='30.0'
def resolution_of_videofile(self):
"""Return video resolution in (height, width) in pixels (NOT the filter chain), requires ffprobe.
"""
p = self.probe()
assert 'streams' in p and len(['streams']) > 0
(H,W) = (p['streams'][0]['height'], p['streams'][0]['width']) # (height, width) in pixels
return (W,H) if ('tags' in p['streams'][0] and 'rotate' in p['streams'][0]['tags'] and p['streams'][0]['tags']['rotate'] in ['90','270']) else (H,W)
def probe(self):
"""Run ffprobe on the filename and return the result as a dictionary"""
if not has_ffprobe:
raise ValueError('"ffprobe" executable not found on path, this is optional for vipy.video - Install from http://ffmpeg.org/download.html')
assert self.hasfilename(), "Invalid video file '%s' for ffprobe" % self.filename()
return ffmpeg.probe(self.filename())
def print(self, prefix='', verbose=True, sleep=None):
"""Print the representation of the video
This is useful for debugging in long fluent chains. Sleep is useful for adding in a delay for distributed processing.
Args:
prefix: prepend a string prefix to the video __repr__ when printing. Useful for logging.
verbose: Print out the video __repr__. Set verbose=False to just sleep
sleep: Integer number of seconds to sleep[ before returning
fluent [bool]: If true, return self else return None. This is useful for terminating long fluent chains in lambdas that return None
Returns:
The video object after sleeping
"""
if verbose:
print(prefix+self.__repr__())
if sleep is not None:
assert isinstance(sleep, int) and sleep > 0, "Sleep must be a non-negative integer number of seconds"
time.sleep(sleep)
return self
def __array__(self):
"""Called on np.array(self) for custom array container, (requires numpy >=1.16)"""
return self.numpy()
def dict(self):
"""Return a python dictionary containing the relevant serialized attributes suitable for JSON encoding."""
return self.json(encode=False)
def json(self, encode=True):
"""Return a json representation of the video.
Args:
encode: If true, return a JSON encoded string using json.dumps
Returns:
A JSON encoded string if encode=True, else returns a dictionary object
.. note:: If the video is loaded, then the JSON will not include the pixels. Try using `vipy.video.Video.store` to serialize videos, or call `vipy.video.Video.flush` first.
"""
if self.isloaded():
warnings.warn("JSON serialization of video requires flushed buffers, will not include the loaded video. Try store()/restore()/unstore() instead to serialize videos as standalone objects efficiently.")
d = {'_filename':self._filename,
'_url':self._url,
'_framerate':self._framerate,
'_array':None,
'_colorspace':self._colorspace,
'attributes':self.attributes,
'_startframe':self._startframe,
'_endframe':self._endframe,
'_endsec':self._endsec,
'_startsec':self._startsec,
'_ffmpeg':self._ffmpeg_commandline()}
return json.dumps(d) if encode else d
def take(self, n):
"""Return n frames from the clip uniformly spaced as numpy array
Args:
n: Integer number of uniformly spaced frames to return
Returns:
A numpy array of shape (n,W,H)
.. warning:: This assumes that the entire video is loaded into memory (e.g. call `vipy.video.Video.load`). Use with caution.
"""
assert self.isloaded(), "load() is required before take()"
dt = int(np.round(len(self._array) / float(n))) # stride
return self._array[::dt][0:n]
def framerate(self, fps=None):
"""Change the input framerate for the video and update frame indexes for all annotations
Args:
fps: Float frames per second to process the underlying video
Returns:
If fps is None, return the current framerate, otherwise set the framerate to fps
"""
if fps is None:
return self._framerate
elif fps == self._framerate:
return self
else:
assert not self.isloaded(), "Filters can only be applied prior to load()"
if 'fps=' in self._ffmpeg_commandline():
self._update_ffmpeg('fps', fps) # replace fps filter, do not add to it
else:
self._ffmpeg = self._ffmpeg.filter('fps', fps=fps, round='up') # create fps filter first time
# if '-ss' in self._ffmpeg_commandline():
# No change is needed here. The seek is in seconds and is independent of the framerate
# if 'trim' in self._ffmpeg_commandline():
# No change is needed here. The trim is in units of seconds which is independent of the framerate
self._framerate = fps
return self
def colorspace(self, colorspace=None):
"""Return or set the colorspace as ['rgb', 'bgr', 'lum', 'float']"""
if colorspace is None:
return self._colorspace
elif self.isloaded():
assert str(colorspace).lower() in ['rgb', 'bgr', 'lum', 'float']
if self.array().dtype == np.float32:
assert str(colorspace).lower() in ['float']
elif self.array().dtype == np.uint8:
assert str(colorspace).lower() in ['rgb', 'bgr', 'lum']
if str(colorspace).lower() in ['lum']:
assert self.channels() == 1, "Luminance colorspace must be one channel uint8"
elif str(colorspace).lower() in ['rgb', 'bgr']:
assert self.channels() == 3, "RGB or BGR colorspace must be three channel uint8"
else:
raise ValueError('Invalid array() type "%s" - only np.float32 or np.uint8 allowed' % str(self.array().dtype))
self._colorspace = str(colorspace).lower()
return self
def nourl(self):
"""Remove the `vipy.video.Video.url` from the video"""
(self._url, self._urluser, self._urlpassword, self._urlsha1) = (None, None, None, None)
return self
def url(self, url=None, username=None, password=None, sha1=None):
"""Video URL and URL download properties"""
if url is not None:
self._url = url # note that this does not change anything else, better to use the constructor for this
if url is not None and (isRTSPurl(url) or isRTMPurl(url)):
self.filename(self._url)
if username is not None:
self._urluser = username # basic authentication
if password is not None:
self._urlpassword = password # basic authentication
if sha1 is not None:
self._urlsha1 = sha1 # file integrity
if url is None and username is None and password is None and sha1 is None:
return self._url
else:
return self
def isloaded(self):
"""Return True if the video has been loaded"""
return self._array is not None
def isloadable(self, flush=True):
"""Return True if the video can be loaded successfully.
This is useful for filtering bad videos or filtering videos that cannot be loaded using your current FFMPEG version.
Args:
flush: [bool] If true, flush the video after it loads. This will clear the video pixel buffer
Returns:
True if load() can be called without FFMPEG exception.
If flush=False, then self will contain the loaded video, which is helpful to avoid load() twice in some conditions
.. warning:: This requires loading and flushing the video. This is an expensive operation when performed on many videos and may result in out of memory conditions with long videos. Use with caution! Try `vipy.video.Video.canload` to test if a single frame can be loaded as a less expensive alternative.
"""
if not self.isloaded():
try:
self.load() # try to load the whole thing
if flush:
self.flush()
return True
except:
return False
else:
return True
def canload(self, frame=0):
"""Return True if the video can be previewed at frame=k successfully.
This is useful for filtering bad videos or filtering videos that cannot be loaded using your current FFMPEG version.
.. notes:: This will only try to preview a single frame. This will not check if the entire video is loadable. Use `vipy.video.Video.isloadable` in this case
"""
if not self.isloaded():
try:
self.preview(framenum=frame) # try to preview
return True
except:
return False
else:
return True
def iscolor(self):
"""Is the video a three channel color video as returned from `vipy.video.Video.channels`?"""
return self.channels() == 3
def isgrayscale(self):
"""Is the video a single channel as returned from `vipy.video.Video.channels`?"""
return self.channels() == 1
def hasfilename(self):
"""Does the filename returned from `vipy.video.Video.filename` exist?"""
return self._filename is not None and (os.path.exists(self._filename) or isRTSPurl(self._filename) or isRTMPurl(self._filename))
def isdownloaded(self):
"""Does the filename returned from `vipy.video.Video.filename` exist, meaning that the url has been downloaded to a local file?"""
return self._filename is not None and os.path.exists(self._filename)
def hasurl(self):
"""Is the url returned from `vipy.video.Video.url` a well formed url?"""
return self._url is not None and isurl(self._url)
def array(self, array=None, copy=False):
"""Set or return the video buffer as a numpy array.
Args:
array: [np.array] A numpy array of size NxHxWxC = (frames, height, width, channels) of type uint8 or float32.
copy: [bool] If true, copy the buffer by value instaed of by reference. Copied buffers do not share pixels.
Returns:
if array=None, return a reference to the pixel buffer as a numpy array, otherwise return the video object.
"""
if array is None:
return self._array
elif isnumpy(array):
assert array.dtype == np.float32 or array.dtype == np.uint8, "Invalid input - array() must be type uint8 or float32"
assert array.ndim == 4, "Invalid input array() must be of shape NxHxWxC, for N frames, of size HxW with C channels"
self._array = np.copy(array) if copy else array
if copy:
self._array.setflags(write=True) # mutable iterators, triggers copy
self.colorspace(None) # must be set with colorspace() after array() before _convert()
return self
else:
raise ValueError('Invalid input - array() must be numpy array')
def fromarray(self, array):
"""Alias for self.array(..., copy=True), which forces the new array to be a copy"""
return self.array(array, copy=True)
def fromdirectory(self, indir, sortkey=None):
"""Create a video from a directory of frames stored as individual image filenames.
Given a directory with files:
framedir/image_0001.jpg
framedir/image_0002.jpg
>>> vipy.video.Video(frames='/path/to/framedir')
"""
return self.fromframes([vipy.image.Image(filename=f) for f in sorted(vipy.util.imlist(indir), key=sortkey)])
def fromframes(self, framelist, copy=True):
"""Create a video from a list of frames"""
assert all([isinstance(im, vipy.image.Image) for im in framelist]), "Invalid input"
return self.array(np.stack([im.load().array() if im.load().array().ndim == 3 else np.expand_dims(im.load().array(), 2) for im in framelist]), copy=copy).colorspace(framelist[0].colorspace())
def tonumpy(self):
"""Alias for numpy()"""
return self.numpy()
def mutable(self):
"""Return a video object with a writeable mutable frame array. Video must be loaded, triggers copy of underlying numpy array if the buffer is not writeable.
Returns:
This object with a mutable frame buffer in self.array() or self.numpy()
"""
assert self.isloaded()
self._array = np.copy(self._array) if not self._array.flags['WRITEABLE'] else self._array # triggers copy
self._array.setflags(write=True) # mutable iterators, torch conversion
return self
def numpy(self):
"""Convert the video to a writeable numpy array, triggers a load() and copy() as needed. Returns the numpy array."""
self.load()
self._array = np.copy(self._array) if not self._array.flags['WRITEABLE'] else self._array # triggers copy
self._array.setflags(write=True) # mutable iterators, torch conversion
return self._array
def zeros(self):
self._array = 0*self.load()._array
return self
def reload(self):
return self.clone(flush=True).load()
def nofilename(self):
self._filename = None
self._update_ffmpeg('filename', None)
return self
def filename(self, newfile=None, copy=False, symlink=False):
"""Update video Filename with optional copy from existing file to new file"""
if newfile is None:
return self._filename
newfile = os.path.normpath(os.path.expanduser(newfile))
# Copy or symlink from the old filename to the new filename (if requested)
if copy:
assert self.hasfilename(), "File not found for copy"
remkdir(filepath(newfile))
shutil.copyfile(self._filename, newfile)
elif symlink:
assert self.hasfilename(), "File not found for symlink"
remkdir(filepath(newfile))
if os.path.islink(newfile) and os.path.abspath(os.readlink(newfile)) == os.path.normpath(os.path.abspath(os.path.expanduser(self.filename()))):
pass # already points to the same place, nothing to do
else:
os.symlink(self._filename, newfile)
# Update ffmpeg filter chain with new input node filename (this file may not exist yet)
self._update_ffmpeg('filename', newfile)
self._filename = newfile
return self
def abspath(self):
"""Change the path of the filename from a relative path to an absolute path (not relocatable)"""
return self.filename(os.path.normpath(os.path.abspath(os.path.expanduser(self.filename()))))
def relpath(self, parent=None):
"""Replace the filename with a relative path to parent (or current working directory if none)"""
parent = parent if parent is not None else os.getcwd()
assert parent in os.path.expanduser(self.filename()), "Parent path '%s' not found in abspath '%s'" % (parent, self.filename())
return self.filename(PurePath(os.path.expanduser(self.filename())).relative_to(parent))
def rename(self, newname):
"""Move the underlying video file preserving the absolute path, such that self.filename() == '/a/b/c.ext' and newname='d.ext', then self.filename() -> '/a/b/d.ext', and move the corresponding file"""
newfile = os.path.join(filepath(self.filename()), newname)
shutil.move(self.filename(), newfile)
return self.filename(newfile)
def filesize(self):
"""Return the size in bytes of the filename(), None if the filename() is invalid"""
return os.path.getsize(self.filename()) if self.hasfilename() else None
def downloadif(self, ignoreErrors=False, timeout=10, verbose=True, max_filesize='350m'):
"""Download URL to filename if the filename has not already been downloaded"""
return self.download(ignoreErrors=ignoreErrors, timeout=timeout, verbose=verbose, max_filesize=max_filesize) if self.hasurl() and not self.isdownloaded() else self
def download(self, ignoreErrors=False, timeout=10, verbose=True, max_filesize='350m'):
"""Download URL to filename provided by constructor, or to temp filename.
Args:
ignoreErrors: [bool] If true, show a warning and return the video object, otherwise throw an exception
timeout: [int] An integer timeout in seconds for the download to connect
verbose: [bool] If trye, show more verbose console output
max_filesize: [str] A string of the form 'NNNg' or 'NNNm' for youtube downloads to limit the maximum size of a URL to '350m' 350MB or '12g' for 12GB.
Returns:
This video object with the video downloaded to the filename()
"""
if self._url is None and self._filename is not None:
return self
if self._url is None:
raise ValueError('[vipy.video.download]: No URL to download')
elif not isurl(str(self._url)):
raise ValueError('[vipy.video.download]: Invalid URL "%s" ' % self._url)
try:
url_scheme = urllib.parse.urlparse(self._url)[0]
if isyoutubeurl(self._url):
f = self._filename if filefull(self._filename) is None else filefull(self._filename)
vipy.videosearch.download(self._url, f, writeurlfile=False, skip=ignoreErrors, verbose=verbose, max_filesize=max_filesize)
for ext in ['mkv', 'mp4', 'webm']:
f = '%s.%s' % (self.filename(), ext)
if os.path.exists(f):
self.filename(f) # change the filename to match the youtube extension
break
if not self.hasfilename():
raise ValueError('Downloaded file not found "%s.*"' % self.filename())
elif url_scheme in ['http', 'https'] and isvideourl(self._url):
vipy.downloader.download(self._url,
self._filename,
verbose=verbose,
timeout=timeout,
sha1=None,
username=None,
password=None)
elif url_scheme == 'file':
shutil.copyfile(self._url, self._filename)
elif url_scheme == 's3':
if self.filename() is None:
self.filename(totempdir(self._url))
if vipy.globals.cache() is not None:
self.filename(os.path.join(remkdir(vipy.globals.cache()), filetail(self._url)))
vipy.downloader.s3(self.url(), self.filename(), verbose=verbose)
elif url_scheme == 'scp':
if self.filename() is None:
self.filename(templike(self._url))
if vipy.globals.cache() is not None:
self.filename(os.path.join(remkdir(vipy.globals.cache()), filetail(self._url)))
vipy.downloader.scp(self._url, self.filename(), verbose=verbose)
elif not isvideourl(self._url) and vipy.videosearch.is_downloadable_url(self._url):
vipy.videosearch.download(self._url, filefull(self._filename), writeurlfile=False, skip=ignoreErrors, verbose=verbose, max_filesize=max_filesize)
for ext in ['mkv', 'mp4', 'webm']:
f = '%s.%s' % (self.filename(), ext)
if os.path.exists(f):
self.filename(f)
break
if not self.hasfilename():
raise ValueError('Downloaded filenot found "%s.*"' % self.filename())
elif url_scheme == 'rtsp':
# https://ffmpeg.org/ffmpeg-protocols.html#rtsp
pass
else:
raise NotImplementedError(
'Invalid URL scheme "%s" for URL "%s"' %
(url_scheme, self._url))
except (httplib.BadStatusLine,
urllib.error.URLError,
urllib.error.HTTPError):
if ignoreErrors:
warnings.warn('[vipy.video][WARNING]: download failed - Ignoring Video')
self._array = None
else:
raise
except IOError:
if ignoreErrors:
warnings.warn('[vipy.video][WARNING]: IO error - Invalid video file, url or invalid write permissions "%s" - Ignoring video' % self.filename())
self._array = None
else:
raise
except KeyboardInterrupt:
raise
except Exception:
if ignoreErrors:
warnings.warn('[vipy.video][WARNING]: load error for video "%s"' % self.filename())
else:
raise
return self
def fetch(self, ignoreErrors=False):
"""Download only if hasfilename() is not found"""
return self.download(ignoreErrors=ignoreErrors) if not self.hasfilename() else self
def shape(self, shape=None, probe=False):
"""Return (height, width) of the frames, requires loading a preview frame from the video if the video is not already loaded, or providing the shape=(height,width) by the user"""
if probe:
return self.shape(self.probeshape(), probe=False)
elif shape is not None:
assert isinstance(shape, tuple), "shape=(height, width) tuple"
self._shape = shape
self._channels = self.channels()
#self._previewhash = hashlib.md5(str(self._ffmpeg_commandline()).encode()).hexdigest()
return self
elif not self.isloaded():
#previewhash = hashlib.md5(str(self._ffmpeg_commandline()).encode()).hexdigest()
#if not hasattr(self, '_previewhash') or previewhash != self._previewhash:
if self._shape is None or len(self._shape) == 0: # dirty filter chain
im = self.preview() # ffmpeg chain changed, load a single frame of video, triggers fetch
self._shape = (im.height(), im.width()) # cache the shape
self._channels = im.channels()
#self._previewhash = previewhash
return self._shape
else:
return (self._array.shape[1], self._array.shape[2])
def issquare(self):
"""Return true if the video has square dimensions (height == width), else false"""
s = self.shape()
return s[0] == s[1]
def channels(self):
"""Return integer number of color channels"""
if not self.isloaded():
self._channels = 3 # always color video
#previewhash = hashlib.md5(str(self._ffmpeg_commandline()).encode()).hexdigest()
#if not hasattr(self, '_previewhash') or previewhash != self._previewhash:
# im = self.preview() # ffmpeg chain changed, load a single frame of video
# self._shape = (im.height(), im.width()) # cache the shape
# self._channels = im.channels() # cache
# self._previewhash = previewhash
return self._channels # cached
else:
return 1 if self.load().array().ndim == 3 else self.load().array().shape[3]
def width(self):
"""Width (cols) in pixels of the video for the current filter chain"""
return self.shape()[1]
def height(self):
"""Height (rows) in pixels of the video for the current filter chain"""
return self.shape()[0]
def aspect_ratio(self):
"""The width/height of the video expressed as a fraction"""
return self.width() / self.height()
def preview(self, framenum=0):
"""Return selected frame of filtered video, return vipy.image.Image object. This is useful for previewing the frame shape of a complex filter chain or the frame contents at a particular location without loading the whole video"""
if self.isloaded():
return self[framenum]
elif self.hasurl() and not self.hasfilename():
self.download(verbose=True)
if not self.hasfilename():
raise ValueError('Video file not found')
# Convert frame to mjpeg and pipe to stdout, used to get dimensions of video
# - The MJPEG encoder will generally output lower quality than H.264 encoded frames
# - This means that frame indexing from preview() will generate slightly different images than streaming raw
# - Beware running convnets, as the pixels will be slightly different (~4 grey levels in uint8) ...
try:
# FFMPEG frame indexing is inefficient for large framenum. Need to add "-ss sec.msec" flag before input
# - the "ss" option must be provided before the input filename, and is supported by ffmpeg-python as ".input(in_filename, ss=time)"
# - Seek to the frame before the desired frame in order to pipe the next (desired) frame
timestamp_in_seconds = max(0.0, (framenum-1)/float(self.framerate()))
f_prepipe = self.clone(shallow=True)._update_ffmpeg_seek(offset=timestamp_in_seconds)._ffmpeg.filter('select', 'gte(n,{})'.format(0))
f = f_prepipe.output('pipe:', vframes=1, format='image2', vcodec='mjpeg')\
.global_args('-cpuflags', '0', '-loglevel', 'debug' if vipy.globals.isdebug() else 'error')
(out, err) = f.run(capture_stdout=True, capture_stderr=True)
except Exception as e:
raise ValueError('[vipy.video.load]: Video preview failed with error "%s"\n - Video: "%s"\n - FFMPEG command: \'sh> %s\'\n - Try manually running this ffmpeg command to see errors. This error usually means that the video is corrupted.' % (str(e), str(self), str(self._ffmpeg_commandline(f_prepipe.output('preview.jpg', vframes=1)))))
# [EXCEPTION]: UnidentifiedImageError: cannot identify image file, means usually that FFMPEG piped a zero length image
try:
return Image(array=np.array(PIL.Image.open(BytesIO(out))))
except Exception as e:
print('[vipy.video.Video.preview][ERROR]: %s' % str(e))
print(' - FFMPEG attempted to extract a single frame from the following video and failed:\n %s' % str(self))
print(' - This may occur after calling clip() with too short a duration, try increasing the clip to be > 1 sec')
print(' - This may occur after calling clip() with a startframe or endframe outside the duration of the video')
print(' - This may occur if requesting a frame number greater than the length of the video. At this point, we do not know the video length, and cannot fail gracefully')
print(' - This may occur when the framerate of the video from ffprobe (tbr) does not match that passed to fps filter, resulting in a zero length image preview piped to stdout')
print(' - This may occur if the filter chain fails for some unknown reason on this video. Try running this ffmpeg command manually and inspect the FFMPEG console output:\n sh> %s' % str(self._ffmpeg_commandline(f_prepipe.output('preview.jpg', vframes=1))))
raise
def thumbnail(self, outfile=None, frame=0):
"""Return annotated frame=k of video, save annotation visualization to provided outfile.
This is functionally equivalent to `vipy.video.Video.frame` with an additional outfile argument to easily save an annotated thumbnail image.
Args:
outfile: [str] an optional outfile to save the annotated frame
frame: [int >= 0] The frame to output the thumbnail
Returns:
A `vipy.image.Image` object for frame k.
"""
im = self.frame(frame, img=self.preview(frame).array())
return im.savefig(outfile) if outfile is not None else im
def load(self, verbose=False, ignoreErrors=False, shape=None):
"""Load a video using ffmpeg, applying the requested filter chain.
Args:
verbose: [bool] if True. then ffmpeg console output will be displayed.
ignoreErrors: [bool] if True, then all load errors are warned and skipped. Be sure to call isloaded() to confirm loading was successful.
shape: [tuple (height, width, channels)] If provided, use this shape for reading and reshaping the byte stream from ffmpeg. This is useful for efficient loading in some scenarios. Knowing the final output shape can speed up loads by avoiding a preview() of the filter chain to get the frame size
Returns:
this video object, with the pixels loaded in self.array()
.. warning:: Loading long videos can result in out of memory conditions. Try to call clip() first to extract a video segment to load().
"""
if self.isloaded():
return self
elif not self.hasfilename() and self.hasurl():
self.download(ignoreErrors=ignoreErrors)
elif not self.hasfilename() and not ignoreErrors:
raise ValueError('Invalid input - load() requires a valid URL, filename or array')
if not self.hasfilename() and ignoreErrors:
print('[vipy.video.load]: Video file "%s" not found - Ignoring' % self.filename())
return self
if verbose:
print('[vipy.video.load]: Loading "%s"' % self.filename())
# Load the video
#
# [EXCEPTION]: older ffmpeg versions may segfault on complex crop filter chains
# -On some versions of ffmpeg setting -cpuflags=0 fixes it, but the right solution is to rebuild from the head (30APR20)
try:
f_prepipe = copy.deepcopy(self._ffmpeg)
f = self._ffmpeg.output('pipe:', format='rawvideo', pix_fmt='rgb24')\
.global_args('-cpuflags', '0', '-loglevel', 'debug' if vipy.globals.isdebug() else 'quiet')
(out, err) = f.run(capture_stdout=True, capture_stderr=True)
except Exception as e:
if not ignoreErrors:
raise ValueError('[vipy.video.load]: Load failed with error "%s"\n\n - Video: "%s"\n - FFMPEG command: \'sh> %s\'\n - This error usually means that the video is corrupted or that you need to upgrade your FFMPEG distribution to the latest stable version.\n - Try running the output of the ffmpeg command for debugging.' % (str(e), str(self), str(self._ffmpeg_commandline(f_prepipe.output('preview.mp4')))))
else:
return self # Failed, return immediately, useful for calling canload()
# Video shape:
# - due to complex filter chains, we may not know the final video size without executing it
# - However, this introduces extra cost by calling preview() on each filter chain
# - If we know what the shape will be (e.g. we made the video square with a known size), then use it here directly
(height, width, channels) = (self.height(), self.width(), self.channels()) if shape is None else shape
# [EXCEPTION]: older ffmpeg versions may be off by one on the size returned from self.preview() which uses an image decoder vs. f.run() which uses a video decoder
# -Try to correct this manually by searching for a off-by-one-pixel decoding that works. The right way is to upgrade your FFMPEG version to the FFMPEG head (11JUN20)
# -We cannot tell which is the one that the end-user wanted, so we leave it up to the calling function to check dimensions (see self.resize())
if (len(out) % (height*width*channels)) != 0:
#warnings.warn('Your FFMPEG version is triggering a known bug that is being worked around in an inefficient manner. Consider upgrading your FFMPEG distribution.')
if (len(out) % ((height-1)*(width-1)*channels) == 0):
(newwidth, newheight) = (width-1, height-1)
elif (len(out) % ((height-1)*(width)*channels) == 0):
(newwidth, newheight) = (width, height-1)
elif (len(out) % ((height-1)*(width+1)*channels) == 0):
(newwidth, newheight) = (width+1, height-1)
elif (len(out) % ((height)*(width-1)*channels) == 0):
(newwidth, newheight) = (width-1, height)
elif (len(out) % ((height)*(width+1)*channels) == 0):
(newwidth, newheight) = (width+1, height)
elif (len(out) % ((height+1)*(width-1)*channels) == 0):
(newwidth, newheight) = (width-1, height+1)
elif (len(out) % ((height+1)*(width)*channels) == 0):
(newwidth, newheight) = (width, height+1)
elif (len(out) % ((height+1)*(width+1)*channels) == 0):
(newwidth, newheight) = (width+1, height+1)
else:
(newwidth, newheight) = (width, height)
is_loadable = (len(out) % (newheight*newwidth*channels)) == 0
if not is_loadable:
im = self.preview() # get the real shape...
(newheight, newwidth, newchannels) = (im.height(), im.width(), im.channels())
assert is_loadable or ignoreErrors, "Load failed for video '%s', and FFMPEG command line: '%s'" % (str(self), str(self._ffmpeg_commandline(f)))
self._array = np.frombuffer(out, np.uint8).reshape([-1, newheight, newwidth, channels]) if is_loadable else None # read-only
self.colorspace('rgb' if channels == 3 else 'lum')
self.resize(rows=height, cols=width) # Very expensive framewise resizing so that the loaded video is identical shape to preview
else:
self._array = np.frombuffer(out, np.uint8).reshape([-1, height, width, channels]) # read-only
self.colorspace('rgb' if channels == 3 else 'lum')
return self
def speed(self, s):
"""Change the speed by a multiplier s. If s=1, this will be the same speed, s=0.5 for half-speed (slower playback), s=2 for double-speed (faster playback)"""
assert s > 0, "Invalid input"
self._ffmpeg = self._ffmpeg.setpts('%1.3f*PTS' % float(1.0/float(s)))
return self
def clip(self, startframe, endframe=None):
"""Load a video clip betweeen start and end frames"""
assert (endframe is None or startframe <= endframe) and startframe >= 0, "Invalid start and end frames (%s, %s)" % (str(startframe), str(endframe))
if not self.isloaded():
timestamp_in_seconds = ((self._startframe if self._startframe is not None else 0)+startframe)/float(self.framerate())
self._update_ffmpeg_seek(timestamp_in_seconds)
if endframe is not None:
self._ffmpeg = self._ffmpeg.setpts('PTS-STARTPTS') # reset timestamp to 0 before trim filter
self._ffmpeg = self._ffmpeg.trim(start=0, end=(endframe-startframe)/self.framerate()) # must be in seconds to allow for framerate conversion
self._ffmpeg = self._ffmpeg.setpts('PTS-STARTPTS') # reset timestamp to 0 after trim filter
self._startframe = startframe if self._startframe is None else self._startframe + startframe # for __repr__ only
self._endframe = (self._startframe + (endframe-startframe)) if endframe is not None else endframe # for __repr__ only
else:
endframe = endframe if endframe is not None else len(self._array)
self._array = self._array[startframe:endframe]
(self._startframe, self._endframe) = (0, endframe-startframe)
return self
def cliprange(self):
"""Return the planned clip (startframe, endframe) range.
This is useful for introspection of the planned clip() before load(), such as for data augmentation purposes without triggering a load.
Returns:
(startframe, endframe) of the video() such that after load(), the pixel buffer will contain frame=0 equivalent to startframe in the source video, and frame=endframe-startframe-1 equivalent to endframe in the source video.
(0, None) If a video does not have a clip() (e.g. clip() was never called, the filter chain does not include a 'trim')
.. notes:: The endframe can be retrieved (inefficiently) using:
>>> int(round(self.duration_in_frames_of_videofile() * (self.framerate() / self.framerate_of_videofile())))
"""
return (self._startframe if self._startframe is not None else 0, self._endframe)
#def cliptime(self, startsec, endsec):
# """Load a video clip betweeen start seconds and end seconds, should be initialized by constructor, which will work but will not set __repr__ correctly"""
# assert startsec <= endsec and startsec >= 0, "Invalid start and end seconds (%s, %s)" % (str(startsec), str(endsec))
# assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
# self._ffmpeg = self._ffmpeg.trim(start=startsec, end=endsec)\
# .setpts('PTS-STARTPTS') # reset timestamp to 0 after trim filter
# self._startsec = startsec if self._startsec is None else self._startsec + startsec # for __repr__ only
# self._endsec = endsec if self._endsec is None else self._startsec + (endsec-startsec) # for __repr__ only
# return self
def rot90cw(self):
"""Rotate the video 90 degrees clockwise, can only be applied prior to load()"""
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
self.shape(shape=(self.width(), self.height())) # transposed
self._ffmpeg = self._ffmpeg.filter('transpose', 1)
return self
def rot90ccw(self):
"""Rotate the video 90 degrees counter-clockwise, can only be applied prior to load()"""
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
self.shape(shape=(self.width(), self.height())) # transposed
self._ffmpeg = self._ffmpeg.filter('transpose', 2)
return self
def fliplr(self):
"""Mirror the video left/right by flipping horizontally"""
if not self.isloaded():
self._ffmpeg = self._ffmpeg.filter('hflip')
else:
self.array(np.stack([np.fliplr(f) for f in self._array]), copy=False)
return self
def flipud(self):
"""Rotate the video 90 degrees counter-clockwise, can only be applied prior to load()"""
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
self._ffmpeg = self._ffmpeg.filter('vflip')
return self
def rescale(self, s):
"""Rescale the video by factor s, such that the new dimensions are (s*H, s*W), can only be applied prior to load()"""
if s == 1:
return self
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
self.shape(shape=(int(np.round(self.height()*float(np.ceil(s*1e6)/1e6))), int(np.round(self.width()*float(np.ceil(s*1e6)/1e6))))) # update the known shape
self._ffmpeg = self._ffmpeg.filter('scale', 'iw*%1.6f' % float(np.ceil(s*1e6)/1e6), 'ih*%1.6f' % float(np.ceil(s*1e6)/1e6)) # ceil last significant digit to avoid off by one
return self
def resize(self, rows=None, cols=None, width=None, height=None):
"""Resize the video to be (rows=height, cols=width)"""
assert not (rows is not None and height is not None)
assert not (cols is not None and width is not None)
rows = rows if rows is not None else height
cols = cols if cols is not None else width
newshape = (rows if rows is not None else int(np.round(self.height()*(cols/self.width()))),
cols if cols is not None else int(np.round(self.width()*(rows/self.height()))))
if (rows is None and cols is None):
return self # only if strictly necessary
if not self.isloaded():
self._ffmpeg = self._ffmpeg.filter('scale', cols if cols is not None else -1, rows if rows is not None else -1)
else:
# Do not use self.__iter__() which triggers copy for mutable arrays
#self.array(np.stack([Image(array=self._array[k]).resize(rows=rows, cols=cols).array() for k in range(len(self))]), copy=False)
# Faster: RGB->RGBX to allow for PIL.Image.fromarray() without tobytes() copy, padding faster than np.concatenate()
#self.array(np.stack([PIL.Image.fromarray(x, mode='RGBX').resize( (cols, rows), resample=PIL.Image.BILINEAR) for x in np.pad(self._array, ((0,0),(0,0),(0,0),(0,1)))])[:,:,:,:-1], copy=False) # RGB->RGBX->RGB
# Fastest: padding introduces more overhead than just accepting tobytes(), image size dependent?
self.array(np.stack([PIL.Image.fromarray(x).resize( (newshape[1], newshape[0]), resample=PIL.Image.BILINEAR) for x in np.ascontiguousarray(self._array)]), copy=False)
self.shape(shape=newshape) # manually set newshape
return self
def mindim(self, dim=None):
"""Resize the video so that the minimum of (width,height)=dim, preserving aspect ratio"""
(H,W) = self.shape() # yuck, need to get image dimensions before filter
return min(self.shape()) if dim is None else (self if min(H,W)==dim else (self.resize(cols=dim) if W<H else self.resize(rows=dim)))
def maxdim(self, dim=None):
"""Resize the video so that the maximum of (width,height)=dim, preserving aspect ratio"""
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
(H,W) = self.shape() # yuck, need to get image dimensions before filter
return max(H,W) if dim is None else (self.resize(cols=dim) if W>H else self.resize(rows=dim))
def randomcrop(self, shape, withbox=False):
"""Crop the video to shape=(H,W) with random position such that the crop contains only valid pixels, and optionally return the box"""
assert shape[0] <= self.height() and shape[1] <= self.width() # triggers preview()
(xmin, ymin) = (np.random.randint(self.height()-shape[0]), np.random.randint(self.width()-shape[1]))
bb = vipy.geometry.BoundingBox(xmin=int(xmin), ymin=int(ymin), width=int(shape[1]), height=int(shape[0])) # may be outside frame
self.crop(bb, zeropad=True)
return self if not withbox else (self, bb)
def centercrop(self, shape, withbox=False):
"""Crop the video to shape=(H,W) preserving the integer centroid position, and optionally return the box"""
assert shape[0] <= self.height() and shape[1] <= self.width() # triggers preview()
bb = vipy.geometry.BoundingBox(xcentroid=float(self.width()/2.0), ycentroid=float(self.height()/2.0), width=float(shape[1]), height=float(shape[0])).int() # may be outside frame
self.crop(bb, zeropad=True)
return self if not withbox else (self, bb)
def centersquare(self):
"""Crop video of size (NxN) in the center, such that N=min(width,height), keeping the video centroid constant"""
return self.centercrop( (min(self.height(), self.width()), min(self.height(), self.width())))
def cropeven(self):
"""Crop the video to the largest even (width,height) less than or equal to current (width,height). This is useful for some codecs or filters which require even shape."""
return self.crop(vipy.geometry.BoundingBox(xmin=0, ymin=0, width=int(vipy.math.even(self.width())), height=int(vipy.math.even(self.height()))))
def maxsquare(self):
"""Pad the video to be square, preserving the upper left corner of the video"""
# This ffmpeg filter can throw the error: "Padded dimensions cannot be smaller than input dimensions." since the preview is off by one. Add one here to make sure.
# FIXME: not sure where in some filter chains this off-by-one error is being introduced, but probably does not matter since it does not affect any annotations
# and since the max square always preserves the scale and the upper left corner of the source video.
# FIXME: this may trigger an inefficient resizing operation during load()
if not self.issquare():
d = max(self.shape())
self._ffmpeg = self._ffmpeg.filter('pad', d+1, d+1, 0, 0)
self.shape(shape=(d+1, d+1))
return self.crop(vipy.geometry.BoundingBox(xmin=0, ymin=0, width=int(d), height=int(d)))
else:
return self
def minsquare(self):
"""Return a square crop of the video, preserving the upper left corner of the video"""
d = min(self.shape())
self.shape(shape=(d, d))
return self.crop(vipy.geometry.BoundingBox(xmin=0, ymin=0, width=int(d), height=int(d)))
def maxmatte(self):
"""Return a square video with dimensions (self.maxdim(), self.maxdim()) with zeropadded lack bars or mattes above or below the video forming a letterboxed video."""
return self.zeropad(max(1, int((max(self.shape()) - self.width())/2)), max(int((max(self.shape()) - self.height())/2), 1)).maxsquare()
def zeropad(self, padwidth, padheight):
"""Zero pad the video with padwidth columns before and after, and padheight rows before and after
.. notes:: Older FFMPEG implementations can throw the error "Input area #:#:#:# not within the padded area #:#:#:# or zero-sized, this is often caused by odd sized padding.
Recommend calling self.cropeven().zeropad(...) to avoid this
"""
assert isinstance(padwidth, int) and isinstance(padheight, int)
if not self.isloaded():
self.shape(shape=(self.height()+2*padheight, self.width()+2*padwidth)) # manually set shape to avoid preview
self._ffmpeg = self._ffmpeg.filter('pad', 'iw+%d' % (2*padwidth), 'ih+%d' % (2*padheight), '%d'%padwidth, '%d'%padheight)
elif padwidth > 0 or padheight > 0:
self.array( np.pad(self.array(), ((0,0), (padheight,padheight), (padwidth,padwidth), (0,0)), mode='constant'), copy=False) # this is very expensive, since np.pad() must copy (once in np.pad >=1.17)
return self
def pad(self, padwidth=0, padheight=0):
"""Alias for zeropad"""
return self.zeropad(padwidth=padwidth, padheight=padheight)
def zeropadlike(self, width, height):
"""Zero pad the video balancing the border so that the resulting video size is (width, height)."""
assert width >= self.width() and height >= self.height(), "Invalid input - final (width=%d, height=%d) must be greater than current image size (width=%d, height=%d)" % (width, height, self.width(), self.height())
assert int(np.floor((width - self.width())/2)) == int(np.ceil((width - self.width())/2)), "Zero pad must be symmetric, this is often due to odd zeropadding which ffmpeg doesn't like. Try changing the width +/- 1 pixel"
assert int(np.floor((height - self.height())/2)) == int(np.ceil((height - self.height())/2)), "Zero pad must be symmetric, this is often due to odd zeropadding which ffmpeg doesn't like. Try changing the height +/- 1 pixel"
return self.zeropad(int(np.floor((width - self.width())/2)),
int(np.floor((height - self.height())/2)))
def crop(self, bbi, zeropad=True):
"""Spatially crop the video using the supplied vipy.geometry.BoundingBox, can only be applied prior to load().
"""
assert isinstance(bbi, vipy.geometry.BoundingBox), "Invalid input"
bbc = bbi.clone().imclipshape(self.width(), self.height()).int() # clipped box to image rectangle
bb = bbi.int() if zeropad else bbc # use clipped box if not zeropad
if bb.isdegenerate():
return None
elif not self.isloaded():
if zeropad and bb != bbc:
# Crop outside the image rectangle will segfault ffmpeg, pad video first (if zeropad=False, then rangecheck will not occur!)
self.zeropad(int(np.ceil(bb.width()-bbc.width())), int(np.ceil(bb.height()-bbc.height()))) # cannot be called in derived classes
bb = bb.offset(int(np.ceil(bb.width()-bbc.width())), int(np.ceil(bb.height()-bbc.height()))) # Shift boundingbox by padding (integer coordinates)
self._ffmpeg = self._ffmpeg.filter('crop', '%d' % bb.width(), '%d' % bb.height(), '%d' % bb.xmin(), '%d' % bb.ymin(), keep_aspect=0) # keep_aspect=False (disable exact=True, this is not present in older ffmpeg)
else:
self.array( bbc.crop(self.array()), copy=False ) # crop first, in-place, valid pixels only
if zeropad and bb != bbc:
((dyb, dya), (dxb, dxa)) = ((max(0, int(abs(np.ceil(bb.ymin() - bbc.ymin())))), max(0, int(abs(np.ceil(bb.ymax() - bbc.ymax()))))),
(max(0, int(abs(np.ceil(bb.xmin() - bbc.xmin())))), max(0, int(abs(np.ceil(bb.xmax() - bbc.xmax()))))))
self._array = np.pad(self.load().array(), ((0,0), (dyb, dya), (dxb, dxa), (0, 0)), mode='constant')
self.shape(shape=(bb.height(), bb.width())) # manually set shape
return self
def pkl(self, pklfile=None):
"""save the object to a pickle file and return the object, useful for intermediate saving in long fluent chains"""
pklfile = pklfile if pklfile is not None else toextension(self.filename(), '.pkl')
remkdir(filepath(pklfile))
vipy.util.save(self, pklfile)
return self
def pklif(self, b, pklfile=None):
"""Save the object to the provided pickle file only if b=True. Uuseful for conditional intermediate saving in long fluent chains"""
assert isinstance(b, bool)
return self.pkl(pklfile) if b else self
def webp(self, outfile, pause=3, strict=True, smallest=False, smaller=False):
"""Save a video to an animated WEBP file, with pause=N seconds on the last frame between loops.
Args:
strict: If true, assert that the filename must have an .webp extension
pause: Integer seconds to pause between loops of the animation
smallest: if true, create the smallest possible file but takes much longer to run
smaller: If true, create a smaller file, which takes a little longer to run
Returns:
The filename of the webp file for this video
.. warning:: This may be slow for very long or large videos
"""
assert strict is False or iswebp(outfile)
outfile = os.path.normpath(os.path.abspath(os.path.expanduser(outfile)))
self.load().frame(0).pil().save(outfile, loop=0, save_all=True, method=6 if smallest else 3 if smaller else 0,
append_images=[self.frame(k).pil() for k in range(1, len(self))],
duration=[int(1000.0/self._framerate) for k in range(0, len(self)-1)] + [pause*1000])
return outfile
def gif(self, outfile, pause=3, smallest=False, smaller=False):
"""Save a video to an animated GIF file, with pause=N seconds between loops.
Args:
pause: Integer seconds to pause between loops of the animation
smallest: If true, create the smallest possible file but takes much longer to run
smaller: if trye, create a smaller file, which takes a little longer to run
Returns:
The filename of the animated GIF of this video
.. warning:: This will be very large for big videos, consider using `vipy.video.Video.webp` instead.
"""
assert isgif(outfile)
return self.webp(outfile, pause, strict=False, smallest=smallest, smaller=True)
def saveas(self, outfile=None, framerate=None, vcodec='libx264', verbose=False, ignoreErrors=False, flush=False, pause=5):
"""Save video to new output video file. This function does not draw boxes, it saves pixels to a new video file.
Args:
outfile: the absolute path to the output video file. This extension can be .mp4 (for video) or [".webp",".gif"] (for animated image)
ignoreErrors: if True, then exit gracefully without throwing an exception. Useful for chaining download().saveas() on parallel dataset downloads
flush: If true, then flush the buffer for this object right after saving the new video. This is useful for transcoding in parallel
framerate: input framerate of the frames in the buffer, or the output framerate of the transcoded video. If not provided, use framerate of source video
pause: an integer in seconds to pause between loops of animated images if the outfile is webp or animated gif
Returns:
a new video object with this video filename, and a clean video filter chain
.. note::
- If self.array() is loaded, then export the contents of self._array to the video file
- If self.array() is not loaded, and there exists a valid video file, apply the filter chain directly to the input video
- If outfile==None or outfile==self.filename(), then overwrite the current filename
"""
outfile = tocache(tempMP4()) if outfile is None else os.path.normpath(os.path.abspath(os.path.expanduser(outfile)))
premkdir(outfile) # create output directory for this file if not exists
framerate = framerate if framerate is not None else self._framerate
if verbose:
print('[vipy.video.saveas]: Saving video "%s" ...' % outfile)
try:
if iswebp(outfile):
return self.webp(outfile, pause)
elif isgif(outfile):
return self.gif(outfile, pause)
elif isjsonfile(outfile):
with open(outfile) as f:
f.write(self.json(encode=True))
return outfile
elif self.isloaded():
# Save numpy() from load() to video, forcing to be even shape
(n, height, width, channels) = self._array.shape
process = ffmpeg.input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height), r=framerate) \
.filter('pad', 'ceil(iw/2)*2', 'ceil(ih/2)*2') \
.output(filename=outfile, pix_fmt='yuv420p', vcodec=vcodec) \
.overwrite_output() \
.global_args('-cpuflags', '0', '-loglevel', 'quiet' if not vipy.globals.isdebug() else 'debug') \
.run_async(pipe_stdin=True)
for frame in self._array:
process.stdin.write(frame.astype(np.uint8).tobytes())
process.stdin.close()
process.wait()
elif (self.isdownloaded() and self._isdirty()) or isRTSPurl(self.filename()) or isRTMPurl(self.filename()):
# Transcode the video file directly, do not load() then export
# Requires saving to a tmpfile if the output filename is the same as the input filename
tmpfile = '%s.tmp%s' % (filefull(outfile), fileext(outfile)) if outfile == self.filename() else outfile
self._ffmpeg.filter('pad', 'ceil(iw/2)*2', 'ceil(ih/2)*2') \
.output(filename=tmpfile, pix_fmt='yuv420p', vcodec=vcodec, r=framerate) \
.overwrite_output() \
.global_args('-cpuflags', '0', '-loglevel', 'quiet' if not vipy.globals.isdebug() else 'debug') \
.run()
if outfile == self.filename():
if os.path.exists(self.filename()):
os.remove(self.filename())
shutil.move(tmpfile, self.filename())
elif self.hasfilename() and not self._isdirty():
shutil.copyfile(self.filename(), outfile)
elif self.hasurl() and not self.hasfilename():
raise ValueError('Input video url "%s" not downloaded, call download() first' % self.url())
elif not self.hasfilename():
raise ValueError('Input video file not found "%s"' % self.filename())
else:
raise ValueError('saveas() failed')
except Exception as e:
if ignoreErrors:
# useful for saving a large number of videos in parallel where some failed download
print('[vipy.video.saveas]: Failed with error "%s" - Returning empty video' % str(repr(e)))
else:
raise
# Return a new video, cloned from this video with the new video file, optionally flush the video we loaded before returning
return self.clone(flushforward=True, flushfilter=True, flushbackward=flush).filename(outfile).nourl()
def savetmp(self):
"""Call `vipy.video.Video.saveas` using a new temporary video file, and return the video object with this new filename"""
return self.saveas(outfile=tempMP4())
def savetemp(self):
"""Alias for `vipy.video.Video.savetmp`"""
return self.savetmp()
def ffplay(self):
"""Play the video file using ffplay"""
assert self.hasfilename() or (self.hasurl() and self.download().hasfilename()) # triggers download if needed
cmd = 'ffplay "%s"' % self.filename()
print('[vipy.video.play]: Executing "%s"' % cmd)
os.system(cmd)
return self
def play(self, verbose=False, notebook=False):
"""Play the saved video filename in self.filename()
If there is no filename, try to download it. If the filter chain is dirty or the pixels are loaded, dump to temp video file first then play it. This uses 'ffplay' on the PATH if available, otherwise uses a fallback player by showing a sequence of matplotlib frames.
If the output of the ffmpeg filter chain has modified this video, then this will be saved to a temporary video file. To play the original video (indepenedent of the filter chain of this video), use `vipy.video.Video.ffplay`.
Args:
verbose: If true, show more verbose output
notebook: If true, play in a jupyter notebook
Returns:
The unmodified video object
"""
if not self.isdownloaded() and self.hasurl():
self.download()
if notebook:
# save to temporary video, this video is not cleaned up and may accumulate
try_import("IPython.display", "ipython"); import IPython.display
if not self.hasfilename() or self.isloaded() or self._isdirty():
v = self.saveas(tempMP4())
warnings.warn('Saving video to temporary file "%s" for notebook viewer ... ' % v.filename())
return IPython.display.Video(v.filename(), embed=True)
return IPython.display.Video(self.filename(), embed=True)
elif has_ffplay:
if self.isloaded() or self._isdirty():
f = tempMP4()
if verbose:
warnings.warn('%s - Saving video to temporary file "%s" for ffplay ... ' % ('Video loaded into memory' if self.isloaded() else 'Dirty FFMPEG filter chain', f))
v = self.saveas(f)
cmd = 'ffplay "%s"' % v.filename()
if verbose:
print('[vipy.video.play]: Executing "%s"' % cmd)
os.system(cmd)
if verbose:
print('[vipy.video.play]: Removing temporary file "%s"' % v.filename())
os.remove(v.filename()) # cleanup
elif self.hasfilename() or (self.hasurl() and self.download().hasfilename()): # triggers download
self.ffplay()
else:
raise ValueError('Invalid video file "%s" - ffplay requires a video filename' % self.filename())
return self
else:
"""Fallback player. This can visualize videos without ffplay, but it cannot guarantee frame rates. Large videos with complex scenes will slow this down and will render at lower frame rates."""
fps = self.framerate()
assert fps > 0, "Invalid display framerate"
with Stopwatch() as sw:
for (k,im) in enumerate(self.load() if self.isloaded() else self.stream()):
time.sleep(max(0, (1.0/self.framerate())*int(np.ceil((self.framerate()/fps))) - sw.since()))
im.show(figure=figure)
if vipy.globals._user_hit_escape():
break
vipy.show.close(figure)
return self
def show(self):
"""Alias for play"""
return self.play()
def quicklook(self, n=9, mindim=256, startframe=0, animate=False, dt=30):
"""Generate a montage of n uniformly spaced frames.
Montage increases rowwise for n uniformly spaced frames, starting from frame zero and ending on the last frame.
Input:
-n: Number of images in the quicklook
-mindim: The minimum dimension of each of the elements in the montage
-animate: If true, return a video constructed by animating the quicklook into a video by showing dt consecutive frames
-dt: The number of frames for animation
-startframe: The initial frame index to start the n uniformly sampled frames for the quicklook
..note:: The first frame in the upper left is guaranteed to be the start frame of the labeled activity, but the last frame in the bottom right may not be precisely the end frame and may be off by at most len(video)/9.
"""
if not self.isloaded():
self.load()
if animate:
return Video(frames=[self.quicklook(n=n, startframe=k, animate=False, dt=dt) for k in range(0, min(dt, len(self)))], framerate=self.framerate())
framelist = [min(int(np.round(f))+startframe, len(self)-1) for f in np.linspace(0, len(self)-1, n)]
imframes = [self.frame(k).maxmatte() # letterbox or pillarbox
for (j,k) in enumerate(framelist)]
imframes = [im.savefig(figure=1).rgb() for im in imframes] # temp storage in memory
return vipy.visualize.montage(imframes, imgwidth=mindim, imgheight=mindim)
def torch(self, startframe=0, endframe=None, length=None, stride=1, take=None, boundary='repeat', order='nchw', verbose=False, withslice=False, scale=1.0, withlabel=False, nonelabel=False):
"""Convert the loaded video of shape NxHxWxC frames to an MxCxHxW torch tensor/
Args:
startframe: [int >= 0] The start frame of the loaded video to use for constructig the torch tensor
endframe: [int >= 0] The end frame of the loaded video to use for constructing the torch tensor
length: [int >= 0] The length of the torch tensor if endframe is not provided.
stride: [int >= 1] The temporal stride in frames. This is the number of frames to skip.
take: [int >= 0] The number of uniformly spaced frames to include in the tensor.
boundary: ['repeat', 'cyclic'] The boundary handling for when the requested tensor slice goes beyond the end of the video
order: ['nchw', 'nhwc', 'chwn', 'cnhw'] The axis ordering of the returned torch tensor N=number of frames (batchsize), C=channels, H=height, W=width
verbose [bool]: Print out the slice used for contructing tensor
withslice: [bool] Return a tuple (tensor, slice) that includes the slice used to construct the tensor. Useful for data provenance.
scale: [float] An optional scale factor to apply to the tensor. Useful for converting [0,255] -> [0,1]
withlabel: [bool] Return a tuple (tensor, labels) that includes the N framewise activity labels.
nonelabel: [bool] returns tuple (t, None) if withlabel=False
Returns
Returns torch float tensor, analogous to torchvision.transforms.ToTensor()
Return (tensor, slice) if withslice=True (withslice takes precedence)
Returns (tensor, labellist) if withlabel=True
.. notes::
- This triggers a load() of the video
- The precedence of arguments is (startframe, endframe) or (startframe, startframe+length), then stride and take.
- Follows numpy slicing rules. Optionally return the slice used if withslice=True
"""
try_import('torch'); import torch
frames = self.load().numpy() if self.load().numpy().ndim == 4 else np.expand_dims(self.load().numpy(), 3) # NxHxWx(C=1, C=3)
assert boundary in ['repeat', 'strict', 'cyclic'], "Invalid boundary mode - must be in ['repeat', 'strict', 'cyclic']"
# Slice index (i=start (zero-indexed), j=end (non-inclusive), k=step)
(i,j,k) = (startframe, endframe, stride)
if startframe == 'random':
assert length is not None, "Random start frame requires fixed length"
i = max(0, np.random.randint(len(frames)-length+1))
if endframe is not None:
assert length is None, "Cannot specify both endframe and length"
assert endframe > startframe, "End frame must be greater than start frame"
(j,k) = (endframe, 1)
if length is not None:
assert endframe is None, "Cannot specify both endframe and length"
assert length >= 0, "Length must be positive"
(j,k) = (i+length, 1)
if length is None and endframe is None:
j = len(frames) # use them all
if stride != 1:
assert take is None, "Cannot specify both take and stride"
assert stride >= 1, "Stride must be >= 1"
k = stride
if take is not None:
# Uniformly sampled frames to result in len(frames)=take
assert stride == 1, "Cannot specify both take and stride"
assert take <= len(frames), "Take must be less than the number of frames"
k = int(np.ceil(len(frames)/float(take)))
# Boundary handling
assert i >= 0, "Start frame must be >= 0"
assert i < j, "Start frame must be less then end frame"
assert k <= len(frames), "Stride must be <= len(frames)"
n = len(frames) # true video length for labels
if boundary == 'repeat' and j > len(frames):
for d in range(j-len(frames)):
frames = np.concatenate( (frames, np.expand_dims(frames[-1], 0) ))
elif boundary == 'cyclic' and j > len(frames):
for d in range(j-len(frames)):
frames = np.concatenate( (frames, np.expand_dims(frames[j % len(frames)], 0) ))
assert j <= len(frames), "invalid slice=%s for frame shape=%s" % (str((i,j,k)), str(frames.shape))
if verbose:
print('[vipy.video.torch]: slice (start,end,step)=%s for frame shape (N,C,H,W)=%s' % (str((i,j,k)), str(frames.shape)))
# Slice and transpose to torch tensor axis ordering
t = torch.from_numpy(frames[i:j:k] if (k!=1 or i!=0 or j!=len(frames)) else frames) # do not copy - This shares the numpy buffer of the video, be careful!
if t.dim() == 2:
t = t.unsqueeze(0).unsqueeze(-1) # HxW -> (N=1)xHxWx(C=1)
if order == 'nchw':
t = t.permute(0,3,1,2) # NxCxHxW, view
elif order == 'nhwc':
pass # NxHxWxC (native numpy order)
elif order == 'cnhw' or order == 'cdhw':
t = t.permute(3,0,1,2) # CxNxHxW == CxDxHxW (for torch conv3d), view
elif order == 'chwn':
t = t.permute(3,1,2,0) # CxHxWxN, view
else:
raise ValueError("Invalid order = must be in ['nchw', 'nhwc', 'chwn', 'cnhw']")
# Scaling (optional)
if scale is not None and self.colorspace() != 'float':
t = (1.0/255.0)*t # [0,255] -> [0,1]
elif scale is not None and scale != 1.0:
t = scale*t
# Return tensor or (tensor, slice) or (tensor, labels)
if withslice:
return (t, (i,j,k))
elif withlabel:
labels = [sorted(tuple(self.activitylabels( (f%n) if boundary == 'cyclic' else min(f, n-1) ))) for f in range(i,j,k)]
return (t, labels)
elif nonelabel:
return (t, None)
else:
return t
def clone(self, flushforward=False, flushbackward=False, flush=False, flushfilter=False, rekey=False, flushfile=False, shallow=False, sharedarray=False, sanitize=True):
"""Create deep copy of video object, flushing the original buffer if requested and returning the cloned object.
Flushing is useful for distributed memory management to free the buffer from this object, and pass along a cloned
object which can be used for encoding and will be garbage collected.
Args:
flushforward: copy the object, and set the cloned object `vipy.video.Video.array` to None. This flushes the video buffer for the clone, not the object
flushbackward: copy the object, and set the object array() to None. This flushes the video buffer for the object, not the clone.
flush: set the object array() to None and clone the object. This flushes the video buffer for both the clone and the object.
flushfilter: Set the ffmpeg filter chain to the default in the new object, useful for saving new videos
flushfile: Remove the filename and the URL from the video object. Useful for creating new video objects from loaded pixels.
rekey: Generate new unique track ID and activity ID keys for this scene
shallow: shallow copy everything (copy by reference), except for ffmpeg object. attributes dictionary is shallow copied
sharedarray: deep copy of everything, except for pixel buffer which is shared. Changing the pixel buffer on self is reflected in the clone.
sanitize: remove private attributes from self.attributes dictionary. A private attribute is any key with two leading underscores '__' which should not be propagated to clone
Returns:
A deepcopy of the video object such that changes to self are not reflected in the copy
.. note:: Cloning videos is an expensive operation and can slow down real time code. Use sparingly.
"""
if sanitize:
a = self.attributes # copy reference to attributes to restore
self.attributes = {} # remove attributes on self for fast clone() since private attributes will be filtered anyway
if flush or (flushforward and flushbackward):
self._array = None # flushes buffer on object and clone
#self._previewhash = None
self._shape = None
v = copy.deepcopy(self) # object and clone are flushed
elif flushbackward:
v = copy.deepcopy(self) # propagates _array to clone
self._array = None # object flushed, clone not flushed
#self._previewhash = None
self._shape = None
elif flushforward:
array = self._array;
self._array = None
#self._previewhash = None
self._shape = None
v = copy.deepcopy(self) # does not propagate _array to clone
self._array = array # object not flushed
v._array = None # clone flushed
elif shallow:
v = copy.copy(self) # shallow copy
v._ffmpeg = copy.deepcopy(self._ffmpeg) # except for ffmpeg object
v.attributes = {k:v for (k,v) in self.attributes.items()} # shallow copy of keys
v._array = np.asarray(self._array) if self._array is not None else None # shared pixels
elif sharedarray:
array = self._array
self._array = None
v = copy.deepcopy(self) # deep copy of everything but pixels
v._array = np.asarray(array) if array is not None else None # shared pixels
self._array = array # restore
else:
v = copy.deepcopy(self)
if flushfilter:
v._ffmpeg = ffmpeg.input(v.filename()) # no other filters
#v._previewhash = None
v._shape = None
(v._startframe, v._endframe) = (None, None)
(v._startsec, v._endsec) = (None, None)
if rekey:
v.rekey()
if flushfile:
v.nofilename().nourl()
if sanitize:
self.attributes = a # restore attributes
v.attributes = {k:v for (k,v) in self.attributes.items()} # shallow copy
v.sanitize() # remove private attributes
return v
def flush(self):
"""Alias for clone(flush=True), returns self not clone"""
self._array = None # flushes buffer on object and clone
#self._previewhash = None
self._shape = None
return self
def returns(self, r=None):
"""Return the provided value, useful for terminating long fluent chains without returning self"""
return r
def flush_and_return(self, retval):
"""Flush the video and return the parameter supplied, useful for long fluent chains"""
self.flush()
return retval
def map(self, func):
"""Apply lambda function to the loaded numpy array img, changes pixels not shape
Lambda function must have the following signature:
* newimg = func(img)
* img: HxWxC numpy array for a single frame of video
* newimg: HxWxC modified numpy array for this frame. Change only the pixels, not the shape
The lambda function will be applied to every frame in the video in frame index order.
"""
assert isinstance(func, types.LambdaType), "Input must be lambda function with np.array() input and np.array() output"
oldimgs = self.load().array()
self.array(np.apply_along_axis(func, 0, self._array)) # FIXME: in-place operation?
if (any([oldimg.dtype != newimg.dtype for (oldimg, newimg) in zip(oldimgs, self.array())]) or
any([oldimg.shape != newimg.shape for (oldimg, newimg) in zip(oldimgs, self.array())])):
self.colorspace('float') # unknown colorspace after shape or type transformation, set generic
return self
def gain(self, g):
"""Pixelwise multiplicative gain, such that each pixel p_{ij} = g * p_{ij}"""
return self.normalize(0, 1, scale=g)
def bias(self, b):
"""Pixelwise additive bias, such that each pixel p_{ij} = b + p_{ij}"""
return self.normalize(mean=0, std=1, scale=1.0, bias=b)
def float(self):
self.load()
self._array = self._array.astype(np.float32) if self._array is not None else self._array
return self
def channel(self, c):
self.load()
assert c >= 0 and c < self.channels()
self._array = self._array[:,:,:,c] if self._array is not None else self._array
return self
def normalize(self, mean, std, scale=1, bias=0):
"""Pixelwise whitening, out = ((scale*in) - mean) / std); triggers load(). All computations float32"""
assert scale >= 0, "Invalid input"
assert all([s > 0 for s in tolist(std)]), "Invalid input"
self._array = vipy.math.normalize(self._array, np.array(mean, dtype=np.float32), np.array(std, dtype=np.float32), np.float32(scale))
if bias != 0:
self._array = self._array + np.array(bias, dtype=np.float32)
return self.colorspace('float')
def setattribute(self, k, v=None):
if self.attributes is None:
self.attributes = {}
self.attributes[k] = v
return self
def _has_private_attribute(self):
"""Does the attributes dictionary contain any private attributes (e.g. those keys prepended with '__')"""
return isinstance(self.attributes, dict) and any([k.startswith('__') for k in self.attributes.keys()])
def hasattribute(self, k):
"""Does the attributes dictionary (self.attributes) contain the provided key"""
return isinstance(self.attributes, dict) and k in self.attributes
def delattribute(self, k):
if k in self.attributes:
self.attributes.pop(k)
return self
def getattribute(self, k):
return self.attributes[k]
class VideoCategory(Video):
"""vipy.video.VideoCategory class
A VideoCategory is a video with associated category, such as an activity class. This class includes all of the constructors of vipy.video.Video
along with the ability to extract a clip based on frames or seconds.
"""
def __init__(self, filename=None, url=None, framerate=30.0, attributes=None, category=None, array=None, colorspace=None, startframe=None, endframe=None, startsec=None, endsec=None):
super().__init__(url=url, filename=filename, framerate=framerate, attributes=attributes, array=array, colorspace=colorspace,
startframe=startframe, endframe=endframe, startsec=startsec, endsec=endsec)
self._category = category
@classmethod
def from_json(cls, s):
d = json.loads(s) if not isinstance(s, dict) else s
v = super().from_json(s)
v._category = d['_category']
return v
def __repr__(self):
strlist = []
if self.isloaded():
strlist.append("height=%d, width=%d, frames=%d" % (self._array[0].shape[0], self._array[0].shape[1], len(self._array)))
if self.filename() is not None:
strlist.append('filename="%s"' % self.filename())
if self.hasurl():
strlist.append('url="%s"' % self.url())
if self.category() is not None:
strlist.append('category="%s"' % self.category())
if not self.isloaded() and self._startframe is not None and self._endframe is not None:
strlist.append('clip=(%d,%d)' % (self._startframe, self._endframe))
if not self.isloaded() and self._startframe is not None and self._endframe is None:
strlist.append('clip=(%d,)' % (self._startframe))
return str('<vipy.video.VideoCategory: %s>' % (', '.join(strlist)))
def json(self, encode=True):
d = json.loads(super().json())
d['_category'] = self._category
return json.dumps(d) if encode else d
def category(self, c=None):
if c is None:
return self._category
else:
self._category = c
return self
class Scene(VideoCategory):
""" vipy.video.Scene class
The vipy.video.Scene class provides a fluent, lazy interface for representing, transforming and visualizing annotated videos.
The following constructors are supported:
>>> vid = vipy.video.Scene(filename='/path/to/video.ext')
Valid video extensions are those that are supported by ffmpeg ['.avi','.mp4','.mov','.wmv','.mpg', 'mkv', 'webm'].
>>> vid = vipy.video.Scene(url='https://www.youtube.com/watch?v=MrIN959JuV8')
>>> vid = vipy.video.Scene(url='http://path/to/video.ext', filename='/path/to/video.ext')
Youtube URLs are downloaded to a temporary filename, retrievable as vid.download().filename(). If the environment
variable 'VIPY_CACHE' is defined, then videos are saved to this directory rather than the system temporary directory.
If a filename is provided to the constructor, then that filename will be used instead of a temp or cached filename.
URLs can be defined as an absolute URL to a video file, or to a site supported by 'youtube-dl'
[https://ytdl-org.github.io/youtube-dl/supportedsites.html]
>>> vid = vipy.video.Scene(array=frames, colorspace='rgb')
The input 'frames' is an NxHxWx3 numpy array corresponding to an N-length list of HxWx3 uint8 numpy array which is a single frame of pre-loaded video
Note that the video transformations (clip, resize, rescale, rotate) are only available prior to load(), and the array() is assumed immutable after load().
>>> vid = vipy.video.Scene(array=greyframes, colorspace='lum')
The input 'greyframes' is an NxHxWx1 numpy array corresponding to an N-length list of HxWx3 uint8 numpy array which is a single frame of pre-loaded video
This corresponds to the luminance of an RGB colorspace
>>> vid = vipy.video.Scene(array=greyframes, colorspace='lum', tracks=tracks, activities=activities)
* tracks = [vipy.object.Track(), ...]
* activities = [vipy.object.Activity(), ...]
The inputs are lists of tracks and/or activities. An object is a spatial bounding box with a category label. A track is a spatiotemporal bounding
box with a category label, such that the box contains the same instance of an object. An activity is one or more tracks with a start and end frame for an
activity performed by the object instances. Track and activity timing must be relative to the start frame of the Scene() constructor.
"""
def __init__(self, filename=None, url=None, framerate=30.0, array=None, colorspace=None, category=None, tracks=None, activities=None,
attributes=None, startframe=None, endframe=None, startsec=None, endsec=None):
self._tracks = {}
self._activities = {}
super().__init__(url=url, filename=filename, framerate=framerate, attributes=attributes, array=array, colorspace=colorspace,
category=category, startframe=startframe, endframe=endframe, startsec=startsec, endsec=endsec)
# Tracks must be defined relative to the clip specified by this constructor
if tracks is not None:
tracks = tracks if isinstance(tracks, list) or isinstance(tracks, tuple) else [tracks] # canonicalize
assert all([isinstance(t, vipy.object.Track) for t in tracks]), "Invalid track input; tracks=[vipy.object.Track(), ...]"
self._tracks = {t.id():t for t in tracks}
# Activities must be defined relative to the clip specified by this constructor
if activities is not None:
activities = activities if isinstance(activities, list) or isinstance(activities, tuple) else [activities] # canonicalize
assert all([isinstance(a, vipy.activity.Activity) for a in activities]), "Invalid activity input; activities=[vipy.activity.Activity(), ...]"
self._activities = {a.id():a for a in activities}
self._currentframe = None # deprecated
@classmethod
def cast(cls, v, flush=False):
"""Cast a conformal vipy object to this class. This is useful for downcast and upcast conversion of video objects."""
assert isinstance(v, vipy.video.Video), "Invalid input - must be derived from vipy.video.Video"
if v.__class__ != vipy.video.Scene:
v.__class__ = vipy.video.Scene
v._tracks = {} if flush or not hasattr(v, '_tracks') else v._tracks
v._activities = {} if flush or not hasattr(v, '_activities') else v._activities
v._category = None if flush or not hasattr(v, '_category') else v._category
return v
@classmethod
def from_json(cls, s):
"""Restore an object serialized with self.json()
Usage:
>>> vs = vipy.video.Scene.from_json(v.json())
"""
d = json.loads(s) if not isinstance(s, dict) else s
v = super().from_json(s)
# Packed attribute storage:
# - When loading a large number of vipy objects, the python garbage collector slows down signficantly due to reference cycle counting
# - Mutable objects and custom containers are tracked by the garbage collector and the more of them that are loaded the longer GC takes
# - To avoid this, load attributes as tuples of packed strings. This is an immutable type that is not refernce counted. Check this with gc.is_tracked()
# - Then, unpack load the attributes on demand when accessing tracks() or activities(). Then, the nested containers are reference counted (even though they really should not since there are no cycles by construction)
# - This is useful when calling vipy.util.load(...) on archives that contain hundreds of thousands of objects
# - Do not access the private attributes self._tracks and self._attributes as they will be packed until needed
# - Should install ultrajson (pip install ujson) for super fast parsing
v._tracks = tuple(d['_tracks'].values()) # efficient garbage collection: store as a packed string to avoid reference cycle tracking, unpack on demand
v._activities = tuple(d['_activities'].values()) # efficient garbage collection: store as a packed string to avoid reference cycle tracking, unpack on demand
return v
def pack(self):
"""Packing a scene returns the scene with the annotations JSON serialized.
- This is useful for fast garbage collection when there are many objects in memory
- This is useful for distributed processing prior to serializing from a scheduler to a client
- This is useful for lazy deserialization of complex attributes when loading many videos into memory
- Unpacking is transparent to the end user and is performed on the fly when annotations are accessed. There is no unpack() method.
- See the notes in from_json() for why this helps with nested containers and reference cycle tracking with the python garbage collector
"""
d = json.loads(self.json())
self._tracks = tuple(d['_tracks'].values()) # efficient garbage collection: store as a packed string to avoid reference cycle tracking, unpack on demand
self._activities = tuple(d['_activities'].values()) # efficient garbage collection: store as a packed string to avoid reference cycle tracking, unpack on demand
return self
def __repr__(self):
strlist = []
if self.isloaded():
strlist.append("height=%d, width=%d, frames=%d, color=%s" % (self.height(), self.width(), len(self._array), self.colorspace()))
if self.filename() is not None:
strlist.append('filename="%s"' % (self.filename()))
if self.hasurl():
strlist.append('url="%s"' % self.url())
if self._framerate is not None:
strlist.append('fps=%1.1f' % float(self._framerate))
if not self.isloaded() and self._startframe is not None and self._endframe is not None:
strlist.append('clip=(%d,%d)' % (self._startframe, self._endframe))
if not self.isloaded() and self._startframe is not None and self._endframe is None:
strlist.append('clip=(%d,)' % (self._startframe))
if self.category() is not None:
strlist.append('category="%s"' % self.category())
if self.hastracks():
strlist.append('tracks=%d' % len(self._tracks))
if self.hasactivities():
strlist.append('activities=%d' % len(self._activities))
return str('<vipy.video.scene: %s>' % (', '.join(strlist)))
def instanceid(self, newid=None):
"""Return an annotation instance identifier for this video.
An instance ID is a unique identifier for a ground truth annotation within a video, either a track or an activity. More than one instance ID may share the same video ID if they are from the same source videofile.
This is useful when calling `vipy.video.Scene.activityclip` or `vipy.video.Scene.activitysplit` to clip a video into segments such that each clip has a unique identifier, but all share the same underlying `vipy.video.Video.videoid`.
This is useful when calling `vipy.video.Scene.trackclip` or `vipy.video.Scene.tracksplit` to clip a video into segments such that each clip has a unique identifier, but all share the same underlying `vipy.video.Video.videoid`.
Returns:
INSTANCEID: if 'instance_id' key is in self.attribute
VIDEOID_INSTANCEID: if '_instance_id' key is in self.attribute, as set by activityclip() or trackclip(). This is set using INSTANCE_ID=ACTIVITYID_ACTIVITYINDEX or INSTANCEID=TRACKID_TRACKINDEX, where the index is the temporal order of the annotation in the source video prior to clip().
VIDEOID_ACTIVITYINDEX: if 'activityindex' key is in self.attribute, as set by activityclip(). (fallback for legacy datasets).
VIDEOID: otherwise
"""
if newid is not None:
self.setattribute('instance_id', newid)
return self
else:
if 'instance_id' in self.attributes:
return self.attributes['instance_id'] # set at video creation time (e.g. pycollector)
elif '_instance_id' in self.attributes:
return self.attributes['_instance_id'] # set at activityclip() time for provenance from clips back to videos
elif 'activityindex' in self.attributes:
return '%s_%s' % (self.videoid(), str(self.attributes['activityindex'])) # set at activityclip() time for provenance from clips back to videos (deprecated)
else:
return self.videoid()
def frame(self, k, img=None):
"""Return `vipy.image.Scene` object at frame k
-The attributes of each of the `vipy.image.Scene.objects` in the scene contains helpful metadata for the provenance of the detection, including:
- 'trackid' of the track this detection
- 'activityid' associated with this detection
- 'jointlabel' of this detection, used for visualization
- 'noun verb' of this detection, used for visualization
Args:
k: [int >=- 0] The frame index requested. This is relative to the current frame rate of the video.
img: [numpy] An optional image to be used for this frame. This is useful to construct frames efficiently for videos if the pixel buffer is available from a stream rather than a preview.
Return:
A `vipy.image.Scene` object for frame k containing all objects in this frame
.. notes::
-Modifying this frame will not affect the source video
"""
assert isinstance(k, int) and k>=0, "Frame index must be non-negative integer"
assert img is not None or (self.isloaded() and k<len(self)) or not self.isloaded(), "Invalid frame index %d - Indexing video by frame must be integer within (0, %d)" % (k, len(self)-1)
img = img if img is not None else (self._array[k] if self.isloaded() else self.preview(k).array())
dets = [t[k].clone(deep=True).setattribute('trackindex', j) for (j, t) in enumerate(self.tracks().values()) if len(t)>0 and (t.during(k) or t.boundary()=='extend')] # track interpolation (cloned) with boundary handling
for d in dets:
d.attributes['activityid'] = [] # reset
jointlabel = [(d.shortlabel(),'')] # [(Noun, Verbing1), (Noun, Verbing2), ...]
activityconf = [None] # for display
for (aid, a) in self.activities().items(): # insertion order: First activity is primary, next is secondary (not in confidence order)
if a.hastrack(d.attributes['trackid']) and a.during(k):
# Jointlabel is always displayed as "Noun Verbing" during activity (e.g. Person Carrying, Vehicle Turning) using noun=track shortlabel, verb=activity shortlabel
# If noun is associated with more than one activity, then this is shown as "Noun Verbing1\nNoun Verbing2", with a newline separator
if not any([a.shortlabel() == v for (n,v) in jointlabel]):
jointlabel.append( (d.shortlabel(), a.shortlabel()) ) # only show each activity once (even if repeated)
activityconf.append(a.confidence())
d.attributes['activityid'].append(a.id()) # for activity correspondence (if desired)
# For display purposes
d.attributes['__jointlabel'] = '\n'.join([('%s %s' % (n,v)).strip() for (n,v) in jointlabel[0 if len(jointlabel)==1 else 1:]])
d.attributes['__noun verb'] = jointlabel[0 if len(jointlabel)==1 else 1:]
d.attributes['__activityconf'] = activityconf[0 if len(jointlabel)==1 else 1:]
dets.sort(key=lambda d: (d.confidence() if d.confidence() is not None else 0, d.shortlabel())) # layering in video is ordered by decreasing track confidence and alphabetical shortlabel
return vipy.image.Scene(array=img, colorspace=self.colorspace(), objects=dets, category=self.category())
def during(self, frameindex):
try:
self.__getitem__(frameindex) # triggers load
return True
except:
return False
def labeled_frames(self):
"""Iterate over frames, yielding tuples (activity+object labelset in scene, vipy.image.Scene())"""
self.load()
for k in range(0, len(self)):
#self._currentframe = k # used only for incremental add()
yield (self.labels(k), self.__getitem__(k))
#self._currentframe = None
def framecomposite(self, n=2, dt=10, mindim=256):
"""Generate a single composite image with minimum dimension mindim as the uniformly blended composite of n frames each separated by dt frames"""
if not self.isloaded():
self.mindim(mindim).load()
imframes = [self.frame(k).maxmatte() for k in range(0, dt*n, dt)]
img = np.uint8(np.sum([1/float(n)*im.array() for im in imframes], axis=0))
return imframes[0].clone().array(img)
def isdegenerate(self):
"""Degenerate scene has empty or malformed tracks"""
return len(self.tracklist()) == 0 or any([t.isempty() or t.isdegenerate() for t in self.tracklist()])
def quicklook(self, n=9, dilate=1.5, mindim=256, fontsize=10, context=False, startframe=0, animate=False, dt=30):
"""Generate a montage of n uniformly spaced annotated frames centered on the union of the labeled boxes in the current frame to show the activity ocurring in this scene at a glance
Montage increases rowwise for n uniformly spaced frames, starting from frame zero and ending on the last frame. This quicklook is most useful when len(self.activities()==1)
for generating a quicklook from an activityclip().
Args:
n [int]: Number of images in the quicklook
dilate [float]: The dilation factor for the bounding box prior to crop for display
mindim [int]: The minimum dimension of each of the elemnets in the montage
fontsize [int]: The size of the font for the bounding box label
context [bool]: If true, replace the first and last frame in the montage with the full frame annotation, to help show the scale of the scene
animate [bool]: If true, return a video constructed by animating the quicklook into a video by showing dt consecutive frames
dt [int]: The number of frames for animation
startframe [int]: The initial frame index to start the n uniformly sampled frames for the quicklook
"""
if not self.isloaded():
self.load()
if animate:
return Video(frames=[self.quicklook(n=n, dilate=dilate, mindim=mindim, fontsize=fontsize, context=context, startframe=k, animate=False, dt=dt) for k in range(0, min(dt, len(self)))], framerate=self.framerate())
f_mutator = vipy.image.mutator_show_jointlabel()
framelist = [min(int(np.round(f))+startframe, len(self)-1) for f in np.linspace(0, len(self)-1, n)]
isdegenerate = [self.frame(k).boundingbox() is None or self.frame(k).boundingbox().dilate(dilate).intersection(self.framebox(), strict=False).isdegenerate() for (j,k) in enumerate(framelist)]
imframes = [self.frame(k).maxmatte() # letterbox or pillarbox
if (isdegenerate[j] or (context is True and (j == 0 or j == (n-1)))) else
self.frame(k).padcrop(self.frame(k).boundingbox().dilate(dilate).imclipshape(self.width(), self.height()).maxsquare().int()).mindim(mindim, interp='nearest')
for (j,k) in enumerate(framelist)]
imframes = [f_mutator(im) for im in imframes] # show jointlabel from frame interpolation
imframes = [im.savefig(fontsize=fontsize, figure=1).rgb() for im in imframes] # temp storage in memory
return vipy.visualize.montage(imframes, imgwidth=mindim, imgheight=mindim)
def tracks(self, tracks=None, id=None):
"""Return mutable dictionary of tracks"""
if isinstance(self._tracks, tuple):
self._tracks = {t.id():t for t in [vipy.object.Track.from_json(json.loads(s)) for s in self._tracks]} # on-demand unpack (efficient garbage collection for large list of objects)
if tracks is None and id is None:
return self._tracks # mutable dict
elif id is not None:
return self._tracks[id]
elif isinstance(tracks, dict):
assert all([isinstance(t, vipy.object.Track) and k == t.id() for (k,t) in tracks.items()]), "Invalid input - Must be dictionary of vipy.object.Track"
self._tracks = tracks.copy() # shallow copy
return self
else:
assert all([isinstance(t, vipy.object.Track) for t in tolist(tracks)]), "Invalid input - Must be vipy.object.Track or list of vipy.object.Track"
self._tracks = {t.id():t for t in tolist(tracks)} # insertion order preserved (python >=3.6)
return self
def track(self, id):
return self.tracks(id=id)
def trackindex(self, id):
assert id in self.tracks()
return [t.id() for t in self.tracklist()].index(id)
def trackidx(self, idx):
return self.tracklist()[idx]
def activity(self, id):
return self.activities(id=id)
def next_activity(self, id):
"""Return the next activity just after the given activityid"""
assert id in self.activities()
A = self.activitylist()
k = [k for (k,a) in enumerate(A) if a.id() == id][0]
return A[k+1] if k<len(A)-1 else None
def prev_activity(self, id):
"""Return the previous activity just before the given activityid"""
assert id in self.activities()
A = self.activitylist()
k = [k for (k,a) in enumerate(A) if a.id() == id][0]
return A[k-1] if k>=1 else None
def tracklist(self):
return list(self.tracks().values()) # triggers shallow copy
def objects(self, casesensitive=True):
"""The objects in a scene are the unique categories of tracks"""
return sorted(list(set([t.category() if casesensitive else t.category().lower() for t in self.tracklist()])))
def actorid(self, id=None, fluent=False):
"""Return or set the actor ID for the video.
- The actor ID is the track ID of the primary actor in the scene. This is useful for assigning a role for activities that are performed by the actor.
- The actor ID is the first track is in the tracklist
Args:
id: [str] if not None, then use this track ID as the actor
fluent: [bool] If true, always return self. This is useful for those cases where the actorid being set is None.
Returns:
[id=None, fluent=False] the actor ID
[id is not None] The video with the actor ID set, only if the ID is found in the tracklist
"""
if id is None:
return next(iter(self.tracks().keys())) if not fluent else self # Python >=3.6
elif id in self._tracks:
# Reorder tracks so that id is first
idlist = [id] + [ti for ti in self.tracks().keys() if ti != id]
self._tracks = {k:self.track(k) for k in idlist}
else:
warnings.warn('trackid=%s not found in "%s"' % (str(id), str(self)))
return self
def setactorid(self, id):
"""Alias for `vipy.video.Scene.actorid`"""
return self.actorid(id, fluent=True)
def actor(self):
"""Return the primary actor (first `vipy.object.Track`) in the video"""
return next(iter(self.tracks().values())) if len(self._tracks)>0 else None # Python >=3.6
def primary_activity(self):
"""Return the primary activity of the video.
- The primary activity is the first activity in the activitylist.
- This is useful for activityclip() videos that are centered on a single activity
Returns:
`vipy.activity.Activity` that is first in the `vipy.video.Scene.activitylist`
"""
return next(iter(self.activities().values())) if len(self._activities)>0 else None # Python >=3.6
def activities(self, activities=None, id=None):
"""Return mutable dictionary of activities. All temporal alignment is relative to the current clip()."""
if isinstance(self._activities, tuple):
self._activities = {a.id():a for a in [vipy.activity.Activity.from_json(json.loads(s)) for s in self._activities]} # on-demand
if activities is None and id is None:
return self._activities # mutable dict
elif id is not None:
return self._activities[id]
elif isinstance(activities, dict):
assert all([isinstance(a, vipy.activity.Activity) and k == a.id() for (k,a) in activities.items()]), "Invalid input - Must be dictionary of vipy.activity.Activity"
self._activities = activities.copy() # shallow copy
return self
else:
assert all([isinstance(a, vipy.activity.Activity) for a in tolist(activities)]), "Invalid input - Must be vipy.activity.Activity or list of vipy.activity.Activity"
self._activities = {a.id():a for a in tolist(activities)} # insertion order preserved (python >=3.6)
return self
def activityindex(self, k):
"""Return the `vipy.activity.Activity` at the requested index order in the video"""
alist = self.activitylist()
assert k >= 0 and k < len(alist), "Invalid index"
return alist[k]
def activitylist(self):
return list(self.activities().values()) # insertion ordered (python >=3.6), triggers shallow copy
def activityfilter(self, f):
"""Apply boolean lambda function f to each activity and keep activity if function is true, remove activity if function is false
Filter out all activities longer than 128 frames
>>> vid = vid.activityfilter(lambda a: len(a)<128)
Filter out activities with category in set
>>> vid = vid.activityfilter(lambda a: a.category() in set(['category1', 'category2']))
Args:
f: [lambda] a lambda function that takes an activity and returns a boolean
Returns:
This video with the activities f(a)==False removed.
"""
assert callable(f)
self._activities = {k:a for (k,a) in self.activities().items() if f(a) == True}
return self
def trackfilter(self, f, activitytrack=True):
"""Apply lambda function f to each object and keep if filter is True.
Args:
activitytrack: [bool] If true, remove track assignment from activities also, may result in activities with no tracks
f: [lambda] The lambda function to apply to each track t, and if f(t) returns True, then keep the track
Returns:
self, with tracks removed in-place
.. note:: Applying track filter with activitytrack=True may result in activities with no associated tracks. You should follow up with self.activityfilter(lambda a: len(a.trackids()) > 0).
"""
assert callable(f)
self._tracks = {k:t for (k,t) in self.tracks().items() if f(t) == True}
if activitytrack:
self.activitymap(lambda a: a.trackfilter(lambda ti: ti in self._tracks)) # remove track association in activities
#if any([len(a.tracks()) == 0 for a in self.activitylist()]):
# warnings.warn('trackfilter(..., activitytrack=True) removed tracks which returned at least one degenerate activity with no tracks')
return self
def trackmap(self, f, strict=True):
"""Apply lambda function f to each activity
-strict=True: enforce that lambda function must return non-degenerate Track() objects
"""
assert callable(f)
self._tracks = {k:f(t) for (k,t) in self.tracks().items()}
assert all([isinstance(t, vipy.object.Track) and (strict is False or not t.isdegenerate()) for (tk,t) in self.tracks().items()]), "Lambda function must return non-degenerate vipy.object.Track()"
return self
def activitymap(self, f):
"""Apply lambda function f to each activity"""
assert callable(f)
self._activities = {k:f(a) for (k,a) in self.activities().items()}
assert all([isinstance(a, vipy.activity.Activity) for a in self.activitylist()]), "Lambda function must return vipy.activity.Activity()"
return self
def rekey(self):
"""Change the track and activity IDs to randomly assigned UUIDs. Useful for cloning unique scenes"""
d_old_to_new = {k:hex(int(uuid.uuid4().hex[0:8], 16))[2:] for (k,a) in self.activities().items()}
self._activities = {d_old_to_new[k]:a.id(d_old_to_new[k]) for (k,a) in self.activities().items()}
d_old_to_new = {k:hex(int(uuid.uuid4().hex[0:8], 16))[2:] for (k,t) in self.tracks().items()}
self._tracks = {d_old_to_new[k]:t.id(d_old_to_new[k]) for (k,t) in self.tracks().items()}
for (k,v) in d_old_to_new.items():
self.activitymap(lambda a: a.replaceid(k,v) )
return self
def annotation(self):
"""Return an iterator over annotations in each frame.
>>> for y in self.annotation():
>>> for (bb,a) in y:
>>> print((bb,a))
Yields:
for each frame yield the tuple: ( (`vipy.object.Detection`, (tuple of `vipy.activity.Activity` performed by the actor in this bounding box)), ... )
.. note:: The preferred method for accessing annotations is a frame iterator, which includes pixels. However, this method provides access to just the annotations without pixels.
"""
endframe = max([a.endframe() for a in self.activitylist()]+[t.endframe() for (tk,t) in self.tracks().items()]) if (len(self._tracks) > 0 or len(self._activities) > 0) else 0
for k in range(0,endframe):
yield tuple( [tuple( [t[k] if t.during(k) else None, tuple( [a for a in self.activitylist() if a.during(k) and a.hastrackoverlap(t)] ) ]) for t in self.tracklist()])
def label(self):
"""Return an iterator over labels in each frame"""
endframe = max([a.endframe() for a in self.activitylist()]+[t.endframe() for (tk,t) in self.tracks().items()]) if (len(self._tracks) > 0 or len(self._activities) > 0) else 0
for k in range(0,endframe):
yield self.labels(k)
def labels(self, k=None):
"""Return a set of all object and activity labels in this scene, or at frame int(k)"""
return self.activitylabels(k).union(self.objectlabels(k))
def activitylabel(self, startframe=None, endframe=None):
"""Return an iterator over activity labels in each frame, starting from startframe and ending when there are no more activities"""
endframe = endframe if endframe is not None else (max([a.endframe() for a in self.activitylist()]) if len(self.activities())>0 else 0)
startframe = startframe if startframe is not None else (min([a.startframe() for a in self.activitylist()]) if len(self.activities())>0 else 0)
assert startframe <= endframe
for k in range(startframe, endframe):
yield self.activitylabels(k)
def activitylabels(self, startframe=None, endframe=None):
"""Return a set of all activity categories in this scene, or at startframe, or in range [startframe, endframe]"""
if startframe is None:
return set([a.category() for a in self.activities().values()])
elif startframe is not None and endframe is None:
return set([a.category() for a in self.activities().values() if a.during(startframe)])
elif startframe is not None and endframe is not None:
return [set([a.category() for a in self.activities().values() if a.during(k)]) for k in range(startframe, endframe)]
else:
raise ValueError('Invalid input - must specify both startframe and endframe, or only startframe')
def objectlabels(self, k=None, lower=False):
"""Return a python set of all activity categories in this scene, or at frame k.
Args:
k: [int] The object labels present at frame k. If k=None, then all object labels in the video
lower: [bool] If true, return the object labels in alll lower case for case invariant string comparisonsn
"""
return set([t.category() if not lower else t.category().lower() for t in self.tracks().values() if k is None or t.during(k)])
def categories(self):
"""Alias for labels()"""
return self.labels()
def activity_categories(self):
"""Alias for activitylabels()"""
return self.activitylabels()
def hasactivities(self):
"""Does this video have any activities?"""
return len(self._activities) > 0
def hasactivity(self, activityid):
"""Does this video have this activity id?"""
return activityid in self.activities()
def hastracks(self):
"""Does this video have any tracks?"""
return len(self._tracks) > 0
def hastrack(self, trackid):
"""Does the video have this trackid?
.. note:: Track IDs are available as vipy.object.Track().id()
"""
return trackid in self.tracks()
def add(self, obj, category=None, attributes=None, rangecheck=True, frame=None, fluent=False):
"""Add the object obj to the scene, and return an index to this object for future updates
This function is used to incrementally build up a scene frame by frame. Obj can be one of the following types:
- obj = vipy.object.Detection(), this must be called from within a frame iterator (e.g. for im in video) to get the current frame index
- obj = vipy.object.Track()
- obj = vipy.activity.Activity()
- obj = [xmin, ymin, width, height], with associated category kwarg, this must be called from within a frame iterator to get the current frame index
It is recomended that the objects are added as follows. For a v=vipy.video.Scene():
>>> for im in v:
>>> # Do some processing on frame im to detect objects
>>> (object_labels, xywh) = object_detection(im)
>>>
>>> # Add them to the scene, note that each object instance is independent in each frame, use tracks for object correspondence
>>> for (lbl,bb) in zip(object_labels, xywh):
>>> v.add(bb, lbl)
>>>
>>> # Do some correspondences to track objects
>>> t2 = v.add( vipy.object.Track(...) )
>>>
>>> # Update a previous track to add a keyframe
>>> v.track(t2).add( ... )
The frame iterator will keep track of the current frame in the video and add the objects in the appropriate place. Alternatively,
>>> v.add(vipy.object.Track(..), frame=k)
Args:
obj: A conformal python object to add to the scene (`vipy.object.Detection`, `vipy.object.Track`, `vipy.activity.Activity`, [xmin, ymin, width, height]
category: Used if obj is an xywh tuple
attributes: Used only if obj is an xywh tuple
frame: [int] The frame to add the object
rangecheck: [bool] If true, check if the object is within the image rectangle and throw an exception if not. This requires introspecting the video shape using `vipy.video.Video.shape`.
fluent: [bool] If true, return self instead of the object index
"""
if isinstance(obj, vipy.object.Detection):
assert frame is not None, "add() for vipy.object.Detection() must be added during frame iteration (e.g. for im in video: )"
k = frame
if obj.hasattribute('trackid') and obj.attributes['trackid'] in self.tracks():
# The attribute "trackid" is set for a detection when interpolating a track at a frame. This is useful for reconstructing a track from previously enumerated detections
trackid = obj.attributes['trackid']
self.trackmap(lambda t: t.update(k, obj) if obj.attributes['trackid'] == t.id() else t)
return None if not fluent else self
else:
t = vipy.object.Track(category=obj.category(), keyframes=[k], boxes=[obj], boundary='strict', attributes=obj.attributes, trackid=obj.attributes['trackid'] if obj.hasattribute('trackid') else None, framerate=self.framerate())
if rangecheck and not obj.hasoverlap(width=self.width(), height=self.height()):
raise ValueError("Track '%s' does not intersect with frame shape (%d, %d)" % (str(t), self.height(), self.width()))
self.tracks()[t.id()] = t # by-reference
return t.id() if not fluent else self
elif isinstance(obj, vipy.object.Track):
if rangecheck and not obj.boundingbox().isinside(vipy.geometry.imagebox(self.shape())):
obj = obj.imclip(self.width(), self.height()) # try to clip it, will throw exception if all are bad
warnings.warn('[vipy.video.add]: Clipping trackid=%s track="%s" to image rectangle' % (str(obj.id()), str(obj)))
if obj.framerate() != self.framerate():
obj.framerate(self.framerate()) # convert framerate of track to framerate of video
self.tracks()[obj.id()] = obj # by-reference
return obj.id() if not fluent else self
elif isinstance(obj, vipy.activity.Activity):
if rangecheck and obj.startframe() >= obj.endframe():
raise ValueError("Activity '%s' has invalid (startframe, endframe)=(%d, %d)" % (str(obj), obj.startframe(), obj.endframe()))
self.activities()[obj.id()] = obj # by-reference, activity may have no tracks
return obj.id() if not fluent else self
elif (istuple(obj) or islist(obj)) and len(obj) == 4 and isnumber(obj[0]):
assert frame is not None, "add() for obj=xywh must be added at a specific frame"
t = vipy.object.Track(category=category, keyframes=[frame], boxes=[vipy.geometry.BoundingBox(xywh=obj)], boundary='strict', attributes=attributes, framerate=self.framerate())
if rangecheck and not t.boundingbox().isinside(vipy.geometry.imagebox(self.shape())):
t = t.imclip(self.width(), self.height()) # try to clip it, will throw exception if all are bad
warnings.warn('Clipping track "%s" to image rectangle' % (str(t)))
self.tracks()[t.id()] = t # by-reference
return t.id() if not fluent else self
else:
raise ValueError('Undefined object type "%s" to be added to scene - Supported types are obj in ["vipy.object.Detection", "vipy.object.Track", "vipy.activity.Activity", "[xmin, ymin, width, height]"]' % str(type(obj)))
def delete(self, id):
"""Delete a given track or activity by id, if present"""
return self.trackfilter(lambda t: t.id() != id).activityfilter(lambda a: a.id() != id)
def addframe(self, im, frame):
"""Add im=vipy.image.Scene() into vipy.video.Scene() at given frame. The input image must have been generated using im=self[k] for this to be meaningful, so that trackid can be associated"""
assert isinstance(im, vipy.image.Scene), "Invalid input - Must be vipy.image.Scene()"
assert im.shape() == self.shape(), "Frame input (shape=%s) must be same shape as video (shape=%s)" % (str(im.shape()), str(self.shape()))
# Copy framewise vipy.image.Scene() into vipy.video.Scene().
self.numpy()[frame] = im.array() # will trigger copy
for bb in im.objects():
self.trackmap(lambda t: t.update(frame, bb) if bb.attributes['trackid'] == t.id() else t)
return self
def clear(self):
"""Remove all activities and tracks from this object"""
self._activities = {}
self._tracks = {}
return self
def cleartracks(self):
self._tracks = {}
return self
def clearactivities(self):
self._activities = {}
return self
def replace(self, other, frame=None):
"""Replace tracks and activities with other if activity/track is during frame"""
assert isinstance(other, vipy.video.Scene)
self.activities([a for a in other.activitylist() if frame is None or a.during(frame)])
self.tracks([t for t in other.tracklist() if frame is None or t.during(frame)])
return self
def json(self, encode=True):
"""Return JSON encoded string of this object. This may fail if attributes contain non-json encodeable object"""
try:
json.loads(json.dumps(self.attributes)) # rount trip for the attributes ductionary - this can be any arbitrary object and contents may not be json encodable
except:
raise ValueError('Video contains non-JSON encodable object in self.attributes dictionary - Try to clear with self.attributes = {} first')
d = json.loads(super().json())
d['_tracks'] = {k:t.json(encode=True) for (k,t) in self.tracks().items()}
d['_activities'] = {k:a.json(encode=True) for (k,a) in self.activities().items()}
try:
return json.dumps(d) if encode else d
except:
# Legacy support for non JSON serializable objects (<= vipy.1.9.2)
v = self.clone()
for (ti, t) in v.tracks().items():
for o in t._keyboxes:
vipy.geometry.BoundingBox.cast(o, flush=True)
o.float().significant_digits(2)
for (ai, a) in v.activities().items():
a._startframe = int(a._startframe)
a._endframe = int(a._endframe)
return v.json(encode=encode)
def csv(self, outfile=None):
"""Export scene to CSV file format with header. If there are no tracks, this will be empty. """
assert self.load().isloaded()
csv = [(self.filename(), # video filename
k, # frame number (zero indexed)
d.category(), d.shortlabel(), # track category and shortlabel (displayed in caption)
';'.join([self.activities(id=aid).category() for aid in tolist(d.attributes['activityid'])] if 'activityid' in d.attributes else ''), # semicolon separated activity category associated with track
d.xmin(), d.ymin(), d.width(), d.height(), # bounding box
d.attributes['trackid'], # globally unique track ID
';'.join([aid for aid in tolist(d.attributes['activityid'])] if 'activityid' in d.attributes else '')) # semicolon separated activity ID associated with track
for (k,im) in enumerate(self) for d in im.objects()]
csv = [('# video_filename', 'frame_number', 'object_category', 'object_shortlabel', 'activity categories(;)', 'xmin', 'ymin', 'width', 'height', 'track_id', 'activity_ids(;)')] + csv
return writecsv(csv, outfile) if outfile is not None else csv
def framerate(self, fps=None):
"""Change the input framerate for the video and update frame indexes for all annotations.
>>> fps = self.framerate()
>>> self.framerate(fps=15.0)
"""
if fps is None:
return self._framerate
elif fps == self._framerate:
return self
else:
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
self._startframe = int(round(self._startframe * (fps/self._framerate))) if self._startframe is not None else self._startframe # __repr__ only
self._endframe = int(round(self._endframe * (fps/self._framerate))) if self._endframe is not None else self._endframe # __repr__only
self._tracks = {k:t.framerate(fps) for (k,t) in self.tracks().items()}
self._activities = {k:a.framerate(fps) for (k,a) in self.activities().items()}
if 'fps=' in self._ffmpeg_commandline():
self._update_ffmpeg('fps', fps) # replace fps filter, do not add to it
else:
self._ffmpeg = self._ffmpeg.filter('fps', fps=fps, round='up') # create fps filter first time
self._framerate = fps
return self
def activitysplit(self, idx=None):
"""Split the scene into k separate scenes, one for each activity. Do not include overlapping activities.
Args:
idx: [int],[tuple],[list]. Return only those activities in the provided activity index list, where the activity index is the integer index of the activity in the video.
.. note:: This is useful for union()
"""
vid = self.clone(flushforward=True)
if any([(a.endframe()-a.startframe()) <= 0 for a in vid.activities().values()]):
warnings.warn('Filtering invalid activity with degenerate lengths: %s' % str([a for a in vid.activities().values() if (a.endframe()-a.startframe()) <= 0]))
activities = sorted([a.clone() for a in vid.activities().values() if (a.endframe()-a.startframe()) > 0], key=lambda a: a.startframe()) # only activities with at least one frame, sorted in temporal order
tracks = [ [t.clone() for (tid, t) in vid.tracks().items() if a.hastrack(t)] for a in activities] # tracks associated with each activity (may be empty)
vid._activities = {} # for faster clone
vid._tracks = {} # for faster clone
return [vid.clone()
.setattribute('_instance_id', ('%s_%d' % (vid.videoid(), k)) if not vid.hasattribute('_instance_id') else vid.getattribute('_instance_id'))
.activities(pa)
.tracks(t)
.setactorid(pa.actorid())
for (k,(pa,t)) in enumerate(zip(activities, tracks)) if idx is None or k in tolist(idx)]
def tracksplit(self):
"""Split the scene into k separate scenes, one for each track. Each scene starts at frame 0 and is a shallow copy of self containing exactly one track.
- This is useful for visualization by breaking a scene into a list of scenes that contain only one track.
- The attribute '_trackindex' is set in the attributes dictionary to provide provenance for the track relative to the source video
.. notes:: Use clone() to create a deep copy if needed.
"""
return [self.clone(shallow=True).setattribute('_trackindex', k).tracks(t).activityfilter(lambda a: a.hastrack(tk)) for (k,(tk,t)) in enumerate(self.tracks().items())]
def trackclip(self):
"""Split the scene into k separate scenes, one for each track. Each scene starts and ends when the track starts and ends"""
return [t.setattribute('_instance_id', '%s_%d' % (t.actorid(), k)).clip(t.track(t.actorid()).startframe(), t.track(t.actorid()).endframe()) for (k,t) in enumerate(self.tracksplit())]
def activityclip(self, padframes=0, multilabel=True, idx=None):
"""Return a list of `vipy.video.Scene` objects each clipped to be temporally centered on a single activity, with an optional padframes before and after.
Args:
padframes: [int] for symmetric padding same before and after
padframes: [tuple] (int, int) for asymmetric padding before and after
padframes: [list[tuples]] [(int, int), ...] for activity specific asymmetric padding
multilabel: [bool] include overlapping multilabel secondary activities in each activityclip
idx: [int], [tuple], [list]. The indexes of the activities to return, where the index is the integer index order of the activity in the video.
Returns:
A list of `vipy.video.Scene` each cloned from the source video and clipped on one activity in the scene
.. notes::
- The Scene() category is updated to be the activity category of the clip, and only the objects participating in the activity are included.
- Clips are returned ordered in the temporal order they appear in the video.
- The returned vipy.video.Scene() objects for each activityclip are clones of the video, with the video buffer flushed.
- Each activityclip() is associated with each activity in the scene, and includes all other secondary activities that the objects in the primary activity also perform (if multilabel=True). See activityclip().labels().
- Calling activityclip() on activityclip(multilabel=True) will duplicate activities, due to the overlapping secondary activities being included in each clip with an overlap. Be careful!
"""
assert isinstance(padframes, int) or istuple(padframes) or islist(padframes)
vid = self.clone(flushforward=True)
if any([(a.endframe()-a.startframe()) <= 0 for a in vid.activities().values()]):
warnings.warn('Filtering invalid activity clips with degenerate lengths: %s' % str([a for a in vid.activities().values() if (a.endframe()-a.startframe()) <= 0]))
primary_activities = sorted([a.clone() for a in vid.activities().values() if (a.endframe()-a.startframe()) > 0], key=lambda a: a.startframe()) # only activities with at least one frame, sorted in temporal order
padframelist = [padframes if istuple(padframes) else (padframes, padframes) for k in range(len(primary_activities))] if not islist(padframes) else padframes
tracks = [ [t.clone() for (tid, t) in vid.tracks().items() if a.hastrackoverlap(t)] for a in primary_activities] # tracks associated with and temporally overlapping each primary activity (may be empty)
secondary_activities = [[sa.clone() for sa in primary_activities if (sa.id() != pa.id() and pa.clone().temporalpad((prepad, postpad)).hasoverlap(sa) and (len(T)==0 or any([sa.hastrack(t) for t in T])))] for (pa, T, (prepad,postpad)) in zip(primary_activities, tracks, padframelist)] # overlapping secondary activities that includes any track in the primary activity
secondary_activities = [sa if multilabel else [] for sa in secondary_activities]
vid._activities = {} # for faster clone
vid._tracks = {} # for faster clone
return [vid.clone()
.activities([pa]+sa) # primary activity first
.tracks(t)
.clip(startframe=max(pa.startframe()-prepad, 0), endframe=(pa.endframe()+postpad))
.category(pa.category())
.setactorid(pa.actorid()) # actor is actor of primary activity
.setattribute('_instance_id', ('%s_%d' % (vid.videoid(), k)) if not vid.hasattribute('_instance_id') else vid.getattribute('_instance_id'))
for (k,(pa,sa,t,(prepad,postpad))) in enumerate(zip(primary_activities, secondary_activities, tracks, padframelist))
if (idx is None or k in tolist(idx))]
def noactivityclip(self, label=None, strict=True, padframes=0):
"""Return a list of vipy.video.Scene() each clipped on a track segment that has no associated activities.
Args:
strict: [bool] True means that background can only occur in frames where no tracks are performing any activities. This is useful so that background is not constructed from secondary objects. False means that background can only occur in frames where a given track is not performing any activities.
label: [str] The activity label to give the background activities. Defaults to the track category (lowercase)
padframes: [int] The amount of temporal padding to apply to the clips before and after in frames. See `vipy.video.Scene.activityclip` for options.
Returns:
A list of `vipy.video.Scene` each cloned from the source video and clipped in the temporal region between activities. The union of activityclip() and noactivityclip() should equal the entire video.
.. notes::
- Each clip will contain exactly one activity "Background" which is the interval for this track where no activities are occurring
- Each clip will be at least one frame long
"""
v = self.clone()
for t in v.tracklist():
bgframe = [k for k in range(t.startframe(), t.endframe()) if not any([a.hastrack(t) and a.during(k) for a in self.activitylist()])]
while len(bgframe) > 0:
(i,j) = (0, np.argwhere(np.diff(bgframe) > 1).flatten()[0] + 1 if len(np.argwhere(np.diff(bgframe) > 1))>0 else len(bgframe)-1)
if i < j:
v.add(vipy.activity.Activity(label=t.category() if label is None else label,
shortlabel='' if label is None else label,
startframe=bgframe[i], endframe=bgframe[j],
actorid=t.id(), framerate=v.framerate(), attributes={'noactivityclip':True}))
bgframe = bgframe[j+1:]
return v.activityfilter(lambda a: 'noactivityclip' in a.attributes).activityclip(padframes=padframes, multilabel=False)
def trackbox(self, dilate=1.0):
"""The trackbox is the union of all track bounding boxes in the video, or None if there are no tracks
Args:
dilate: [float] A dilation factor to apply to the trackbox before returning. See `vipy.geometry.BoundingBox.dilate`
Returns:
A `vipy.geometry.BoundingBox` which is the union of all boxes in the track (or None if no boxes exist)
"""
boxes = [t.clone().boundingbox() for t in self.tracklist()]
boxes = [bb.dilate(dilate) for bb in boxes if bb is not None]
return boxes[0].union(boxes[1:]) if len(boxes) > 0 else None
def framebox(self):
"""Return the bounding box for the image rectangle.
Returns:
A `vipy.geometry.BoundingBox` which defines the image rectangle
.. notes: This requires calling `vipy.video.Video.preview` to get the frame shape from the current filter chain, which touches the video file"""
return vipy.geometry.BoundingBox(xmin=0, ymin=0, width=self.width(), height=self.height())
def trackcrop(self, dilate=1.0, maxsquare=False, zeropad=True):
"""Return the trackcrop() of the scene which is the crop of the video using the `vipy.video.Scene.trackbox`.
Args:
zeropad: [bool] If True, the zero pad the crop if it is outside the image rectangle, otherwise return only valid pixels inside the image rectangle
maxsquare: [bool] If True, make the bounding box the maximum square before cropping
dilate: [float] The dilation factor to apply to the trackbox prior to cropping
Returns:
A `vipy.video.Scene` object from cropping the video using the trackbox. If there are no tracks, return None.
"""
bb = self.trackbox(dilate) # may be None if trackbox is degenerate
return self.crop(bb.maxsquareif(maxsquare), zeropad=zeropad) if bb is not None else None
def activitybox(self, activityid=None, dilate=1.0):
"""The activitybox is the union of all activity bounding boxes in the video, which is the union of all tracks contributing to all activities. This is most useful after activityclip().
The activitybox is the smallest bounding box that contains all of the boxes from all of the tracks in all activities in this video.
"""
activities = [a for (k,a) in self.activities().items() if (activityid is None or k in set(activityid))]
boxes = [t.clone().boundingbox().dilate(dilate) for t in self.tracklist() if any([a.hastrack(t) for a in activities])]
return boxes[0].union(boxes[1:]) if len(boxes) > 0 else vipy.geometry.BoundingBox(xmin=0, ymin=0, width=int(self.width()), height=int(self.height()))
def activitycuboid(self, activityid=None, dilate=1.0, maxdim=256, bb=None):
"""The activitycuboid() is the fixed square spatial crop corresponding to the activitybox (or supplied bounding box), which contains all of the valid activities in the scene. This is most useful after activityclip().
The activitycuboid() is a spatial crop of the video corresponding to the supplied boundingbox or the square activitybox().
This crop must be resized such that the maximum dimension is provided since the crop can be tiny and will not be encodable by ffmpeg
"""
bb = self.activitybox(activityid).maxsquare() if bb is None else bb
assert bb is None or isinstance(bb, vipy.geometry.BoundingBox)
assert bb.issquare(), "Add support for non-square boxes"
return self.clone().crop(bb.dilate(dilate).int(), zeropad=True).resize(maxdim, maxdim) # crop triggers preview()
def activitysquare(self, activityid=None, dilate=1.0, maxdim=256):
"""The activity square is the maxsquare activitybox that contains only valid (non-padded) pixels interior to the image"""
bb = self.activitybox(activityid).maxsquare().dilate(dilate).int().iminterior(self.width(), self.height()).minsquare()
return self.activitycuboid(activityid, dilate=1.0, maxdim=maxdim, bb=bb)
def activitytube(self, activityid=None, dilate=1.0, maxdim=256):
"""The activitytube() is a sequence of crops where the spatial box changes on every frame to track the activity.
The box in each frame is the square activitybox() for this video which is the union of boxes contributing to this activity in each frame.
This function does not perform any temporal clipping. Use activityclip() first to split into individual activities.
Crops will be optionally dilated, with zeropadding if the box is outside the image rectangle. All crops will be resized so that the maximum dimension is maxdim (and square by default)
"""
vid = self.clone().load() # triggers load
self.activityfilter(lambda a: activityid is None or a.id() in set(activityid)) # only requested IDs (or all of them)
frames = [im.padcrop(im.boundingbox().maxsquare().dilate(dilate).int()).resize(maxdim, maxdim) for im in vid if im.boundingbox() is not None] # track interpolation, for frames with boxes only
if len(frames) != len(vid):
warnings.warn('[vipy.video.activitytube]: Removed %d frames with no spatial bounding boxes' % (len(vid) - len(frames)))
vid.attributes['__activtytube'] = {'truncated':len(vid) - len(frames)} # provenance to reject
if len(frames) == 0:
warnings.warn('[vipy.video.activitytube]: Resulting video is empty! Setting activitytube to zero')
frames = [ vid[0].resize(maxdim, maxdim).zeros() ] # empty frame
vid.attributes['__activitytube'] = {'empty':True} # provenance to reject
vid._tracks = {ti:vipy.object.Track(keyframes=[f for (f,im) in enumerate(frames) for d in im.objects() if d.attributes['trackid'] == ti],
boxes=[d for (f,im) in enumerate(frames) for d in im.objects() if d.attributes['trackid'] == ti],
category=t.category(), trackid=ti, framerate=self.framerate())
for (k,(ti,t)) in enumerate(self.tracks().items())} # replace tracks with boxes relative to tube
return vid.array(np.stack([im.numpy() for im in frames]))
def actortube(self, trackid=None, dilate=1.0, maxdim=256, strict=True):
"""The actortube() is a sequence of crops where the spatial box changes on every frame to track the primary actor performing an activity.
The box in each frame is the square box centered on the primary actor performing the activity, dilated by a given factor (the original box around the actor is unchanged, this just increases the context, with zero padding)
This function does not perform any temporal clipping. Use activityclip() first to split into individual activities.
All crops will be resized so that the maximum dimension is maxdim (and square by default)
"""
assert trackid is not None or len(self.tracks()) == 1, "Track ID must be provided if there exists more than one track in the scene"
trackid = trackid if trackid is not None else list(self.tracks().keys())[0]
assert self.hastrack(trackid), "Track ID %s not found - Actortube requires a track ID in the scene (tracks=%s)" % (str(trackid), str(self.tracks()))
vid = self.clone().load() # triggers load
t = vid.tracks(id=trackid) # actor track
frames = [im.padcrop(t[k].maxsquare().dilate(dilate).int()).resize(maxdim, maxdim) for (k,im) in enumerate(vid) if t.during(k)] if len(t)>0 else [] # actor interpolation, padding may introduce frames with no tracks
if len(frames) == 0:
if not strict:
warnings.warn('[vipy.video.actortube]: Empty track for trackid="%s" - Setting actortube to zero' % trackid)
frames = [ vid[0].resize(maxdim, maxdim).zeros() ] # empty frame
vid.attributes['__actortube'] = {'empty':True} # provenance to reject
else:
raise ValueError('[vipy.video.actortube]: Empty track for track=%s, trackid=%s' % (str(t), trackid))
vid._tracks = {ti:vipy.object.Track(keyframes=[f for (f,im) in enumerate(frames) for d in im.objects() if d.attributes['trackid'] == ti], # keyframes zero indexed, relative to [frames]
boxes=[d for (f,im) in enumerate(frames) for d in im.objects() if d.attributes['trackid'] == ti], # one box per frame
category=t.category(), trackid=ti, framerate=self.framerate()) # preserve trackid
for (k,(ti,t)) in enumerate(self.tracks().items())} # replace tracks with interpolated boxes relative to tube defined by actor
return vid.array(np.stack([im.numpy() for im in frames]))
def speed(self, s):
"""Change the speed by a multiplier s. If s=1, this will be the same speed, s=0.5 for half-speed (slower playback), s=2 for double-speed (faster playback)"""
super().speed(s)
return self.trackmap(lambda t: t.framerate(speed=s)).activitymap(lambda a: a.framerate(speed=s))
def clip(self, startframe, endframe=None):
"""Clip the video to between (startframe, endframe). This clip is relative to clip() shown by __repr__().
Args:
startframe: [int] the start frame relative to the video framerate() for the clip
endframe: [int] the end frame relative to the video framerate for the clip, may be none
Returns:
This video object, clipped so that a load() will result in frame=0 equivalent to startframe. All tracks and activities updated relative to the new startframe.
.. note:
- This return a clone of the video for idempotence
- This does not load the video. This updates the ffmpeg filter chain to temporally trim the video. See self.commandline() for the updated filter chain to run.
"""
assert (endframe is None or startframe <= endframe) and startframe >= 0, "Invalid start and end frames (%s, %s)" % (str(startframe), str(endframe))
v = self.clone()
if not v.isloaded():
# -- Copied from super().clip() to allow for clip on clone (for indempotence)
# -- This code copy is used to avoid super(Scene, self.clone()) which screws up class inheritance for iPython reload
assert not v.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
timestamp_in_seconds = ((v._startframe if v._startframe is not None else 0)+startframe)/float(v.framerate())
v._update_ffmpeg_seek(timestamp_in_seconds)
if endframe is not None:
v._ffmpeg = v._ffmpeg.setpts('PTS-STARTPTS') # reset timestamp to 0 before trim filter
v._ffmpeg = v._ffmpeg.trim(start=0, end=(endframe-startframe)/self.framerate()) # must be in seconds to allow for framerate conversion
v._ffmpeg = v._ffmpeg.setpts('PTS-STARTPTS') # reset timestamp to 0 after trim filter
v._startframe = startframe if v._startframe is None else v._startframe + startframe # for __repr__ only
v._endframe = (v._startframe + (endframe-startframe)) if endframe is not None else v._endframe # for __repr__ only
# -- end copy
else:
endframe = endframe if endframe is not None else len(self._array)
v._array = self._array[startframe:endframe]
(v._startframe, v._endframe) = (0, endframe-startframe)
v._tracks = {k:t.offset(dt=-startframe).truncate(startframe=0, endframe=(endframe-startframe) if endframe is not None else None) for (k,t) in v.tracks().items()} # may be degenerate
v._activities = {k:a.offset(dt=-startframe).truncate(startframe=0, endframe=(endframe-startframe) if endframe is not None else None) for (k,a) in v.activities().items()} # may be degenerate
return v.trackfilter(lambda t: len(t)>0).activityfilter(lambda a: len(a)>0) # remove degenerate tracks and activities
def crop(self, bb, zeropad=True):
"""Crop the video using the supplied box, update tracks relative to crop, video is zeropadded if box is outside frame rectangle"""
assert isinstance(bb, vipy.geometry.BoundingBox), "Invalid input"
bb = bb.int()
bbc = bb.clone().imclipshape(self.width(), self.height()).int()
#if zeropad and bb != bbc:
# self.zeropad(bb.width()-bbc.width(), bb.height()-bbc.height())
# bb = bb.offset(bb.width()-bbc.width(), bb.height()-bbc.height())
super().crop(bb, zeropad=zeropad) # range check handled here to correctly apply zeropad
bb = bb if zeropad else bbc
self._tracks = {k:t.offset(dx=-bb.xmin(), dy=-bb.ymin()) for (k,t) in self.tracks().items()}
return self
def zeropad(self, padwidth, padheight):
"""Zero pad the video with padwidth columns before and after, and padheight rows before and after
Update tracks accordingly.
"""
assert isinstance(padwidth, int) and isinstance(padheight, int)
super().zeropad(padwidth, padheight)
self._tracks = {k:t.offset(dx=padwidth, dy=padheight) for (k,t) in self.tracks().items()}
return self
def fliplr(self):
(H,W) = self.shape() # yuck, need to get image dimensions before filter
self._tracks = {k:t.fliplr(H,W) for (k,t) in self.tracks().items()}
super().fliplr()
return self
def flipud(self):
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
(H,W) = self.shape() # yuck, need to get image dimensions before filter
self._tracks = {k:t.flipud(H,W) for (k,t) in self.tracks().items()}
super().flipud()
return self
def rot90ccw(self):
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
(H,W) = self.shape() # yuck, need to get image dimensions before filter
self._tracks = {k:t.rot90ccw(H,W) for (k,t) in self.tracks().items()}
super().rot90ccw()
return self
def rot90cw(self):
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
(H,W) = self.shape() # yuck, need to get image dimensions before filter
self._tracks = {k:t.rot90cw(H,W) for (k,t) in self.tracks().items()}
super().rot90cw()
return self
def resize(self, rows=None, cols=None, width=None, height=None):
"""Resize the video to (rows, cols), preserving the aspect ratio if only rows or cols is provided"""
assert not (rows is not None and height is not None) # cannot be both
assert not (cols is not None and width is not None) # cannot be both
rows = rows if rows is not None else height
cols = cols if cols is not None else width
assert rows is not None or cols is not None, "Invalid input"
(H,W) = self.shape() # yuck, need to get image dimensions before filter, manually set this prior to calling resize if known
sy = rows / float(H) if rows is not None else cols / float(W)
sx = cols / float(W) if cols is not None else rows / float(H)
self._tracks = {k:t.scalex(sx) for (k,t) in self.tracks().items()}
self._tracks = {k:t.scaley(sy) for (k,t) in self.tracks().items()}
super().resize(rows=rows, cols=cols)
return self
def mindim(self, dim=None):
"""Resize the video so that the minimum of (width,height)=dim, preserving aspect ratio"""
(H,W) = self.shape() # yuck, need to get image dimensions before filter
return min(self.shape()) if dim is None else (self if min(H,W) == dim else (self.resize(cols=dim) if W<H else self.resize(rows=dim)))
def maxdim(self, dim=None):
"""Resize the video so that the maximum of (width,height)=dim, preserving aspect ratio"""
assert not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
(H,W) = self.shape() # yuck, need to get image dimensions before filter
return max(H,W) if dim is None else (self.resize(cols=dim) if W>H else self.resize(rows=dim))
def rescale(self, s):
"""Spatially rescale the scene by a constant scale factor.
Args:
s: [float] Scale factor > 0 to isotropically scale the image.
"""
assert s == 1 or not self.isloaded(), "Filters can only be applied prior to load() - Try calling flush() first"
self._tracks = {k:t.rescale(s) for (k,t) in self.tracks().items()}
super().rescale(s)
return self
def startframe(self):
return self._startframe
def extrapolate(self, f, dt=None):
"""Extrapolate the video to frame f and add the extrapolated tracks to the video"""
return self.trackmap(lambda t: t.add(f, t.linear_extrapolation(f, dt=dt if dt is not None else self.framerate()), strict=False))
def dedupe(self, spatial_iou_threshold=0.8, dt=5):
"""Find and delete duplicate tracks by track segmentiou() overlap.
Algorithm
- For each pair of tracks with the same category, find the larest temporal segment that contains both tracks.
- For this segment, compute the IOU for each box interpolated at a stride of dt frames
- Compute the mean IOU for this segment. This is the segment IOU.
- If the segment IOU is greater than the threshold, merge the shorter of the two tracks with the current track.
"""
deleted = set([])
for tj in sorted(self.tracklist(), key=lambda t: len(t), reverse=True): # longest to shortest
for (s, ti) in sorted([(0,t) if (len(tj) < len(t) or t.id() in deleted or t.id() == tj.id() or t.category() != tj.category()) else (tj.fragmentiou(t, dt=dt), t) for t in self.tracklist()], key=lambda x: x[0], reverse=True):
if s > spatial_iou_threshold: # best mean framewise overlap during overlapping segment of two tracks (ti, tj)
print('[vipy.video.dedupe]: merging duplicate track "%s" (id=%s) which overlaps with "%s" (id=%s)' % (ti, ti.id(), tj, tj.id()))
self.tracks()[tj.id()] = tj.union(ti) # merge
self.activitymap(lambda a: a.replace(ti, tj)) # replace merged track reference in activity
deleted.add(ti.id())
self.trackfilter(lambda t: t.id() not in deleted) # remove duplicate tracks
return self
def combine(self, other, tracks=True, activities=True):
"""Combine the activities and tracks from both scenes into self"""
assert isinstance(other, Scene), "Invalid input - must be vipy.video.Scene() object and not type=%s" % str(type(other))
assert self.framerate() == other.framerate()
o = other.clone(rekey=True) # make sure keys are unique
if activities:
self.activities().update(o.activities())
if tracks:
self.tracks().update(o.tracks())
return self
def union(self, other, temporal_iou_threshold=0.5, spatial_iou_threshold=0.6, strict=True, overlap='average', percentilecover=0.8, percentilesamples=100, activity=True, track=True):
"""Compute the union two scenes as the set of unique activities and tracks.
A pair of activities or tracks are non-unique if they overlap spatially and temporally by a given IoU threshold. Merge overlapping tracks.
Tracks are merged by considering the mean IoU at the overlapping segment of two tracks with the same category greater than the provided spatial_iou_threshold threshold
Activities are merged by considering the temporal IoU of the activities of the same class greater than the provided temporal_iou_threshold threshold
Input:
-Other: Scene or list of scenes for union. Other may be a clip of self at a different framerate, spatial isotropic scake, clip offset
-spatial_iou_threshold: The intersection over union threshold for the mean of the two segments of an overlapping track, Disable by setting to 1.0
-temporal_iou_threshold: The intersection over union threshold for a temporal bounding box for a pair of activities to be declared duplicates. Disable by setting to 1.0
-strict: Require both scenes to share the same underlying video filename
-overlap=['average', 'replace', 'keep']
-average: Merge two tracks by averaging the boxes (average=True) if overlapping
-replace: merge two tracks by replacing overlapping boxes with other (discard self)
-keep: merge two tracks by keeping overlapping boxes with other (discard other)
-percentilecover [0,1]: When determining the assignment of two tracks, compute the percentilecover of two tracks by ranking the cover in the overlapping segment and computing the mean of the top-k assignments, where k=len(segment)*percentilecover.
-percentilesamples [>1]: the number of samples along the overlapping scemgne for computing percentile cover
-activity [bool]: union() of activities only
-track [bool]: union() of tracks only
Output:
-Updates this scene to include the non-overlapping activities from other. By default, it takes the strict union of all activities and tracks.
Notes:
-This is useful for merging scenes computed using a lower resolution/framerate/clipped object or activity detector without running the detector on the high-res scene
-This function will preserve the invariance for v == v.clear().union(v.rescale(0.5).framerate(5).activityclip()), to within the quantization error of framerate() downsampling.
-percentileiou is a robust method of track assignment when boxes for two tracks (e.g. ground truth and detections) where one track may deform due to occlusion.
"""
assert overlap in ['average', 'replace', 'keep'], "Invalid input - 'overlap' must be in [average, replace, keep]"
assert spatial_iou_threshold >= 0 and spatial_iou_threshold <= 1, "invalid spatial_iou_threshold, must be between [0,1]"
assert temporal_iou_threshold >= 0 and temporal_iou_threshold <= 1, "invalid temporal_iou_threshold, must be between [0,1]"
assert percentilesamples >= 1, "invalid samples, must be >= 1"
if not activity and not track:
return self # nothing to do
sc = self.clone() # do not change self yet, make a copy then merge at the end
for o in tolist(other):
assert isinstance(o, Scene), "Invalid input - must be vipy.video.Scene() object and not type=%s" % str(type(o))
if strict:
assert sc.filename() == o.filename(), "Invalid input - Scenes must have the same underlying video. Disable this with strict=False."
oc = o.clone() # do not change other, make a copy
# Key collision?
if len(set(sc.tracks().keys()).intersection(set(oc.tracks().keys()))) > 0:
print('[vipy.video.union]: track key collision - Rekeying other... Use other.rekey() to suppress this warning.')
oc.rekey()
if len(set(sc.activities().keys()).intersection(set(oc.activities().keys()))) > 0:
print('[vipy.video.union]: activity key collision - Rekeying other... Use other.rekey() to suppress this warning.')
oc.rekey()
# Similarity transform? Other may differ from self by a temporal scale (framerate), temporal translation (clip) or spatial isotropic scale (rescale)
assert np.isclose(sc.aspect_ratio(), oc.aspect_ratio(), atol=1E-2), "Invalid input - Scenes must have the same aspect ratio"
if sc.width() != oc.width():
oc = oc.rescale(sc.width() / oc.width()) # match spatial scale
if not np.isclose(sc.framerate(), oc.framerate(), atol=1E-3):
oc = oc.framerate(sc.framerate()) # match temporal scale (video in oc will not match, only annotations)
if sc.startframe() != oc.startframe():
dt = (oc.startframe() if oc.startframe() is not None else 0) - (sc.startframe() if sc.startframe() is not None else 0)
oc = oc.trackmap(lambda t: t.offset(dt=dt)).activitymap(lambda a: a.offset(dt=dt)) # match temporal translation of tracks and activities
oc = oc.trackfilter(lambda t: ((not t.isdegenerate()) and len(t)>0), activitytrack=False)
# Merge other tracks into selfclone: one-to-many mapping from self to other
merged = {} # dictionary mapping trackid in other to the trackid in self, each track in other can be merged at most once
for ti in sorted(sc.tracklist(), key=lambda t: len(t), reverse=True): # longest to shortest
for tj in sorted(oc.tracklist(), key=lambda t: len(t), reverse=True):
if ti.category() == tj.category() and (tj.id() not in merged) and tj.segment_percentilecover(sc.track(ti.id()), percentile=percentilecover, samples=percentilesamples) > spatial_iou_threshold: # mean framewise overlap during overlapping segment of two tracks
sc.tracks()[ti.id()] = sc.track(ti.id()).union(tj, overlap=overlap) # merge duplicate/fragmented tracks from other into self, union() returns clone
merged[tj.id()] = ti.id()
print('[vipy.video.union]: merging track "%s"(id=%s) + "%s"(id=%s) for scene "%s"' % (str(ti), str(ti.id()), str(tj), str(tj.id()), str(sc)))
oc.trackfilter(lambda t: t.id() not in merged, activitytrack=False) # remove duplicate other track for final union
# Merge other activities into selfclone: one-to-one mapping
for (i,j) in merged.items(): # i=id of other, j=id of self
oc.activitymap(lambda a: a.replaceid(i, j) if a.hastrack(i) else a) # update track IDs referenced in activities for merged tracks
for (i,ai) in sc.activities().items():
for (j,aj) in oc.activities().items():
if ai.category() == aj.category() and set(ai.trackids()) == set(aj.trackids()) and ai.temporal_iou(aj) > temporal_iou_threshold:
oc.activityfilter(lambda a: a.id() != j) # remove duplicate activity from final union
oc.activityfilter(lambda a: len(a.tracks())>0) # remove empty activities not merged
# Union
sc.tracks().update(oc.tracks())
sc.activities().update(oc.activities())
# Final union of unique tracks/activities
if track:
self.tracks(sc.tracklist()) # union of tracks only
if activity:
self.activities(sc.activitylist()) # union of activities only: may reference tracks not in self of track=False
return self
def annotate(self, outfile=None, fontsize=10, captionoffset=(0,0), textfacecolor='white', textfacealpha=1.0, shortlabel=True, boxalpha=0.25, d_category2color={'Person':'green', 'Vehicle':'blue', 'Object':'red'}, categories=None, nocaption=False, nocaption_withstring=[], mutator=None, timestamp=None, timestampcolor='black', timestampfacecolor='white', verbose=True):
"""Generate a video visualization of all annotated objects and activities in the video.
The annotation video will be at the resolution and framerate of the underlying video, and pixels in this video will now contain the overlay.
This function does not play the video, it only generates an annotation video frames. Use show() which is equivalent to annotate().saveas().play()
Args:
outfile: [str] An optional file to stream the anntation to without storing the annotated video in memory
fontsize: [int] The fontsize of bounding box captions, used by matplotlib
captionoffset: (tuple) The (x,y) offset relative to the bounding box to place the caption for each box.
textfacecolor: [str] The color of the text in the bounding box caption. Must be in `vipy.gui.using_matplotlib.colorlist`.
textfacealpha: [float] The transparency of the text in the bounding box caption. Must be in [0,1], where 0=transparent and 1=opaque.
shortlabel: [bool] If true, display the shortlabel for each object in the scene, otherwise show the full category
boxalpha: [float] The transparency of the box face behind the text. Must be in [0,1], where 0=transparent and 1=opaque.
d_category2color: [dict] A dictionary mapping categories of objects in the scene to their box colors. Named colors must be in `vipy.gui.using_matplotlib.colorlist`.
categories: [list] Only show these categories, or show them all if None
nocaption_withstring: [list]: Do not show captions for those detection categories (or shortlabels) containing any of the strings in the provided list
nocaption: [bool] If true, do not show any captions, just boxes
mutator: [lambda] A lambda function that will mutate an image to allow for complex visualizations. This should be a mutator like `vipy.image.mutator_show_trackid`.
timestamp: [bool] If true, show a semitransparent timestamp (when the annotation occurs, not when the video was collected) with frame number in the upper left corner of the video
timestampcolor: [str] The color of the timstamp text. Named colors must be in `vipy.gui.using_matplotlib.colorlist`.
timestampfacecolor: [str] The color of the timestamp background. Named colors must be in `vipy.gui.using_matplotlib.colorlist`.
verbose: [bool] Show more helpful messages if true
Returns:
A `vipy.video.Video` with annotations in the pixels. If outfile is provided, then the returned video will be flushed.
.. note:: In general, this function should not be run on very long videos without the outfile kwarg, as it requires loading the video framewise into memory.
"""
if verbose:
print('[vipy.video.annotate]: Annotating video ...')
f_mutator = mutator if mutator is not None else vipy.image.mutator_show_jointlabel()
f_timestamp = (lambda k: '%s %d' % (vipy.util.clockstamp(), k)) if timestamp is True else timestamp
if outfile is None:
assert self.load().isloaded(), "Load() failed"
imgs = [f_mutator(self[k].clone(), k).savefig(fontsize=fontsize,
captionoffset=captionoffset,
textfacecolor=textfacecolor,
textfacealpha=textfacealpha,
shortlabel=shortlabel,
boxalpha=boxalpha,
d_category2color=d_category2color,
categories=categories,
nocaption=nocaption,
timestampcolor=timestampcolor,
timestampfacecolor=timestampfacecolor,
timestamp=f_timestamp(k) if timestamp is not None else None,
figure=1 if k<(len(self)-1) else None, # cleanup on last frame
nocaption_withstring=nocaption_withstring).numpy() for k in range(0, len(self))]
# Replace pixels with annotated pixels and downcast object to vipy.video.Video (since there are no more objects to show)
return vipy.video.Video(array=np.stack([np.array(PIL.Image.fromarray(img).convert('RGB')) for img in imgs], axis=0), framerate=self.framerate(), attributes=self.attributes) # slow for large videos
else:
# Stream to output video without loading all frames into memory
n = self.duration_in_frames_of_videofile() if not self.isloaded() else len(self)
vo = vipy.video.Video(filename=outfile, framerate=self.framerate())
with vo.stream(overwrite=True) as so:
for (k,im) in enumerate(self.stream()):
so.write(f_mutator(im.clone(), k).savefig(fontsize=fontsize,
captionoffset=captionoffset,
textfacecolor=textfacecolor,
textfacealpha=textfacealpha,
shortlabel=shortlabel,
boxalpha=boxalpha,
d_category2color=d_category2color,
categories=categories,
nocaption=nocaption,
timestampcolor=timestampcolor,
timestampfacecolor=timestampfacecolor,
timestamp=f_timestamp(k) if timestamp is not None else None,
figure=1 if k<(n-1) else None, # cleanup on last frame
nocaption_withstring=nocaption_withstring).rgb())
return vo
def _show(self, outfile=None, verbose=True, fontsize=10, captionoffset=(0,0), textfacecolor='white', textfacealpha=1.0, shortlabel=True, boxalpha=0.25, d_category2color={'Person':'green', 'Vehicle':'blue', 'Object':'red'}, categories=None, nocaption=False, nocaption_withstring=[], notebook=False, timestamp=None, timestampcolor='black', timestampfacecolor='white'):
"""Generate an annotation video saved to outfile (or tempfile if outfile=None) and show it using ffplay when it is done exporting. Do not modify the original video buffer. Returns a clone of the video with the shown annotation."""
return self.clone().annotate(verbose=verbose,
fontsize=fontsize,
captionoffset=captionoffset,
textfacecolor=textfacecolor,
textfacealpha=textfacealpha,
shortlabel=shortlabel,
boxalpha=boxalpha,
d_category2color=d_category2color,
categories=categories,
nocaption=nocaption,
timestampcolor=timestampcolor,
timestampfacecolor=timestampfacecolor,
timestamp=timestamp,
nocaption_withstring=nocaption_withstring).saveas(outfile).play(notebook=notebook)
def show(self, outfile=None, verbose=True, fontsize=10, captionoffset=(0,0), textfacecolor='white', textfacealpha=1.0, shortlabel=True, boxalpha=0.25, d_category2color={'Person':'green', 'Vehicle':'blue', 'Object':'red'}, categories=None, nocaption=False, nocaption_withstring=[], figure=1, fps=None, timestamp=None, timestampcolor='black', timestampfacecolor='white', mutator=None):
"""Faster show using interative image show for annotated videos. This can visualize videos before video rendering is complete, but it cannot guarantee frame rates. Large videos with complex scenes will slow this down and will render at lower frame rates."""
fps = min(fps, self.framerate()) if fps is not None else self.framerate()
assert fps > 0, "Invalid display framerate"
f_timestamp = (lambda k: '%s %d' % (vipy.util.clockstamp(), k)) if timestamp is True else timestamp
f_mutator = mutator if mutator is not None else vipy.image.mutator_show_jointlabel()
if not self.isdownloaded() and self.hasurl():
self.download()
with Stopwatch() as sw:
for (k,im) in enumerate(self.load() if self.isloaded() else self.stream()):
time.sleep(max(0, (1.0/self.framerate())*int(np.ceil((self.framerate()/fps)))))
f_mutator(im,k).show(categories=categories,
figure=figure,
nocaption=nocaption,
nocaption_withstring=nocaption_withstring,
fontsize=fontsize,
boxalpha=boxalpha,
d_category2color=d_category2color,
captionoffset=captionoffset,
textfacecolor=textfacecolor,
textfacealpha=textfacealpha,
timestampcolor=timestampcolor,
timestampfacecolor=timestampfacecolor,
timestamp=f_timestamp(k) if timestamp is not None else None,
shortlabel=shortlabel)
if vipy.globals._user_hit_escape():
break
vipy.show.close(figure)
return self
def thumbnail(self, outfile=None, frame=0, fontsize=10, nocaption=False, boxalpha=0.25, dpi=200, textfacecolor='white', textfacealpha=1.0):
"""Return annotated frame=k of video, save annotation visualization to provided outfile if provided, otherwise return vipy.image.Scene"""
im = self.frame(frame, img=self.preview(framenum=frame).array())
return im.savefig(outfile=outfile, fontsize=fontsize, nocaption=nocaption, boxalpha=boxalpha, dpi=dpi, textfacecolor=textfacecolor, textfacealpha=textfacealpha) if outfile is not None else im
def stabilize(self, flowdim=256, gpu=None):
"""Background stablization using flow based stabilization masking foreground region. This will output a video with all frames aligned to the first frame, such that the background is static."""
from vipy.flow import Flow # requires opencv
return Flow(flowdim=flowdim, gpu=gpu).stabilize(self.clone(), residual=True)
def pixelmask(self, pixelsize=8):
"""Replace all pixels in foreground boxes with pixelation"""
for im in self.mutable(): # convert to writeable numpy array, triggers writeable copy
im.pixelmask(pixelsize) # shared numpy array
return self
def binarymask(self):
"""Replace all pixels in foreground boxes with white, zero in background"""
for im in self.mutable(): # convert to writeable numpy array, triggers writeable copy
im.binarymask() # shared numpy array
return self
def asfloatmask(self, fg=1.0, bg=0.0):
"""Replace all pixels in foreground boxes with fg, and bg in background, return a copy"""
assert self.isloaded()
self.numpy() # convert to writeable numpy array, triggers writeable copy
array = np.full( (len(self.load()), self.height(), self.width(), 1), dtype=np.float32, fill_value=bg)
for (k,im) in enumerate(self):
for bb in im.objects():
if bb.hasintersection(im.imagebox()):
array[k, int(round(bb._ymin)):int(round(bb._ymax)), int(round(bb._xmin)):int(round(bb._xmax))] = fg # does not need imclip
return vipy.video.Video(array=array, framerate=self.framerate(), colorspace='float')
def meanmask(self):
"""Replace all pixels in foreground boxes with mean color"""
for im in self.mutable(): # convert to writeable numpy array, triggers writeable copy
im.meanmask() # shared numpy array
return self
def fgmask(self):
"""Replace all pixels in foreground boxes with zero"""
for im in self.mutable(): # convert to writeable numpy array, triggers writeable copy
im.fgmask() # shared numpy array
return self
def zeromask(self):
"""Alias for fgmask"""
return self.fgmask()
def blurmask(self, radius=7):
"""Replace all pixels in foreground boxes with gaussian blurred foreground"""
for im in self.mutable(): # convert to writeable numpy array, triggers writeable copy
im.blurmask(radius) # shared numpy array
return self
def downcast(self):
"""Cast the object to a `vipy.video.Video` class"""
self.__class__ = vipy.video.Video
return self
def merge_tracks(self, dilate_height=2.0, dilate_width=2.0, framedist=5):
"""Merge tracks if a track endpoint dilated by a fraction overlaps exactly one track startpoint, and the endpoint and startpoint are close enough together temporally.
.. note::
- This is useful for continuing tracking when the detection framerate was too low and the assignment falls outside the measurement gate.
- This will not work for complex scenes, as it assumes that there is exactly one possible continuation for a track.
"""
merged = set([])
for ti in sorted(self.tracklist(), key=lambda t: t.startframe()):
for tj in sorted(self.tracklist(), key=lambda t: t.startframe()):
if (tj.id() not in merged) and (ti.id() != tj.id()) and (tj.startframe() >= ti.endframe()) and ((tj.startframe()-ti.endframe()) <= framedist) and (ti.category() == tj.category()):
di = ti[ti.endframe()].dilate_height(dilate_height).dilate_width(dilate_width)
dj = tj[tj.startframe()]
if di.iou(dj) > 0 and not any([di.iou(tk[tj.startframe()]) > 0 for tk in self.tracklist() if (tk.id() not in [ti.id(), tj.id()]) and tk.during(tj.startframe())]):
self.tracks()[ti.id()] = ti.union(tj) # Merge tracks that are within gating distance
self.delete(tj.id()) # remove merged track
merged.add(tj.id())
break
return self
def assign(self, frame, dets, minconf=0.2, maxhistory=5, activityiou=0.5, trackcover=0.2, trackconfsamples=4, gate=0, activitymerge=True, activitynms=False):
"""Assign a list of vipy.object.Detections at frame k to scene by greedy track association. In-place update.
Args:
miniou: [float] the minimum temporal IOU for activity assignment
minconf: [float] the minimum confidence for a detection to be considered as a new track
maxhistory: [int] the maximum propagation length of a track with no measurements, the frame history ised for velocity estimates
trackconfsamples: [int] the number of uniformly spaced samples along a track to compute a track confidence
gate: [int] the gating distance in pixels used for assignment of fast moving detections. Useful for low detection framerates if a detection does not overlap with the track.
trackcover: [float] the minimum cover necessary for assignment of a detection to a track
activitymerge: [bool] if true, then merge overlapping activity detections of the same track and category, otherwise each activity detection is added as a new detection
activitynms: [bool] if true, then perform non-maximum suppression of activity detections of the same actor and category that overlap more than activityiou
Returns:
This video object with each det assigned to correpsonding track or activity.
"""
assert dets is None or all([isinstance(d, vipy.object.Detection) or isinstance(d, vipy.activity.Activity) for d in tolist(dets)]), "invalid input"
assert frame >= 0 and minconf >= 0 and minconf <= 1.0 and maxhistory > 0, "invalid input"
if dets is None or len(tolist(dets)) == 0:
return self
dets = tolist(dets)
if any([d.confidence() is None for d in dets]):
warnings.warn('Removing %d detections with no confidence' % len([d.confidence() is None for d in dets]))
dets = [d for d in dets if d.confidence() is not None]
objdets = [d for d in dets if isinstance(d, vipy.object.Detection)]
activitydets = [d for d in dets if isinstance(d, vipy.activity.Activity)]
# Object detection to track assignment
if len(objdets) > 0:
# Track propagation: Constant velocity motion model for active tracks
t_ref = [(t, t.linear_extrapolation(frame, dt=maxhistory, shape=False)) for (k,t) in self.tracks().items() if ((frame - t.endframe()) <= maxhistory)]
trackarea = [ti.area() for (t,ti) in t_ref]
detarea = [d.area() for d in objdets]
# Track assignment:
# - Each track is assigned at most one detection
# - Each detection is assigned to at most one track.
# - Assignment is the highest confidence maximum overlapping detection within tracking gate
trackconf = {t.id():t.confidence(samples=trackconfsamples) for (t, ti) in t_ref}
assignments = [(t, d.confidence(), d.iou(ti, area=detarea[j], otherarea=trackarea[i]), d.shapeiou(ti, area=detarea[j], otherarea=trackarea[i]), d.maxcover(ti, area=detarea[j], otherarea=trackarea[i]), d)
for (i, (t, ti)) in enumerate(t_ref)
for (j,d) in enumerate(objdets)
if (t.category() == d.category() and
(((ti._xmax if ti._xmax < d._xmax else d._xmax) - (ti._xmin if ti._xmin > d._xmin else d._xmin)) > 0 and
((ti._ymax if ti._ymax < d._ymax else d._ymax) - (ti._ymin if ti._ymin > d._ymin else d._ymin)) > 0))]
assigned = set([])
posconf = min([d.confidence() for d in objdets]) if len(objdets)>0 else 0
assignments.sort(key=lambda x: (x[1]+posconf)*(x[2]+x[3]+x[4])+trackconf[x[0].id()], reverse=True) # in-place
for (t, conf, iou, shapeiou, cover, d) in assignments:
if cover > (trackcover if len(t)>1 else 0): # the highest confidence detection within the iou gate (or any overlap if not yet enough history for velocity estimate)
if (t.id() not in assigned and d.id() not in assigned): # not assigned yet, assign it!
self.track(t.id()).update(frame, d.clone()) # track assignment! (clone required)
assigned.add(t.id()) # cannot assign again to this track
assigned.add(d.id()) # mark detection as assigned
# Track spawn from unassigned and unexplained detections
for (j,d) in enumerate(objdets):
if (d.id() not in assigned):
if (d.confidence() >= minconf and not any([t.linear_extrapolation(frame, dt=maxhistory, shape=False).maxcover(d, otherarea=detarea[j]) >= 0.7 for (i,(t,ti)) in enumerate(t_ref) if t.category() == d.category()])):
gated = [(t, t.linear_extrapolation(frame, dt=maxhistory, shape=False)) for (t,ti) in t_ref if (t.id() not in assigned and t.category() == d.category())] if gate>0 else []
gated = sorted([(t, ti) for (t, ti) in gated if ti.hasintersection(d, gate=gate)], key=lambda x: d.sqdist(x[1]))
if len(gated) > 0:
self.track(gated[0][0].id()).update(frame, d.clone()) # track assignment! (clone required)
assigned.add(gated[0][0].id())
assigned.add(d.id())
else:
assigned.add(self.add(vipy.object.Track(keyframes=[frame], boxes=[d.clone()], category=d.category(), framerate=self.framerate()), rangecheck=False)) # clone required
assigned.add(d.id())
# Activity assignment
if len(activitydets) > 0:
assert all([d.actorid() in self.tracks() for d in activitydets]), "Invalid activity"
assigned = set([])
if activitymerge:
minframe = min([a._startframe for a in activitydets])
activities = [a for a in self.activities().values() if a._endframe >= minframe]
for d in activitydets:
for a in activities:
if (a._label == d._label) and (a._actorid == d._actorid) and a.hasoverlap(d, activityiou):
a.union(d) # activity assignment
assigned.add(d._id)
break # assigned, early exit
if activitynms:
minframe = min([a._startframe for a in activitydets])
activities = sorted([a for a in self.activities().values() if a._endframe >= minframe], key=lambda a: a.confidence(), reverse=True)
for d in sorted(activitydets, key=lambda x: x.confidence(), reverse=True):
for a in activities:
if (a._label == d._label) and (a._actorid == d._actorid) and a.hasoverlap(d, activityiou):
assigned.add(a._id if d.confidence()>a.confidence() else d._id) # suppressed
for id in assigned:
if id in self._activities:
del self._activities[id] # suppression, faster than self.activityfilter(lambda a: a.id() in assigned)
# Activity construction from unassigned detections
for d in activitydets:
if d._id not in assigned:
self.add(d.clone())
return self
def RandomVideo(rows=None, cols=None, frames=None):
"""Return a random loaded vipy.video.video.
Useful for unit testing, minimum size (32x32x32) for ffmpeg
"""
rows = np.random.randint(256, 1024) if rows is None else rows
cols = np.random.randint(256, 1024) if cols is None else cols
frames = np.random.randint(32, 256) if frames is None else frames
assert rows>32 and cols>32 and frames>=32
return Video(array=np.uint8(255 * np.random.rand(frames, rows, cols, 3)), colorspace='rgb')
def RandomScene(rows=None, cols=None, frames=None):
"""Return a random loaded vipy.video.Scene.
Useful for unit testing.
"""
v = RandomVideo(rows, cols, frames)
(rows, cols) = v.shape()
tracks = [vipy.object.Track(label='track%d' % k, shortlabel='t%d' % k,
keyframes=[0, np.random.randint(50,100), 150],
framerate=30,
boxes=[vipy.object.Detection(xmin=np.random.randint(0,cols - 16), ymin=np.random.randint(0,rows - 16),
width=np.random.randint(16,cols//2), height=np.random.randint(16,rows//2)),
vipy.object.Detection(xmin=np.random.randint(0,cols - 16), ymin=np.random.randint(0,rows - 16),
width=np.random.randint(16,cols//2), height=np.random.randint(16,rows//2)),
vipy.object.Detection(xmin=np.random.randint(0,cols - 16), ymin=np.random.randint(0,rows - 16),
width=np.random.randint(16,cols//2), height=np.random.randint(16,rows//2))]) for k in range(0,32)]
activities = [vipy.activity.Activity(label='activity%d' % k, shortlabel='a%d' % k, tracks=[tracks[j].id() for j in [np.random.randint(32)]], startframe=np.random.randint(50,99), endframe=np.random.randint(100,150), framerate=30) for k in range(0,32)]
return Scene(array=v.array(), colorspace='rgb', category='scene', tracks=tracks, activities=activities, framerate=30)
def RandomSceneActivity(rows=None, cols=None, frames=256):
"""Return a random loaded vipy.video.Scene.
Useful for unit testing.
"""
v = RandomVideo(rows, cols, frames)
(rows, cols) = v.shape()
tracks = [vipy.object.Track(label=['Person','Vehicle','Object'][k], shortlabel='track%d' % k, boundary='strict',
keyframes=[0, np.random.randint(50,100), np.random.randint(50,150)],
framerate=30,
boxes=[vipy.object.Detection(xmin=np.random.randint(0,cols - 16), ymin=np.random.randint(0,rows - 16),
width=np.random.randint(16,cols//2), height=np.random.randint(16,rows//2)),
vipy.object.Detection(xmin=np.random.randint(0,cols - 16), ymin=np.random.randint(0,rows - 16),
width=np.random.randint(16,cols//2), height=np.random.randint(16,rows//2)),
vipy.object.Detection(xmin=np.random.randint(0,cols - 16), ymin=np.random.randint(0,rows - 16),
width=np.random.randint(16,cols//2), height=np.random.randint(16,rows//2))]) for k in range(0,3)]
activities = [vipy.activity.Activity(label='Person Carrying', shortlabel='Carry', tracks=[tracks[0].id(), tracks[1].id()], startframe=np.random.randint(20,50), endframe=np.random.randint(70,100), framerate=30)]
ims = Scene(array=v.array(), colorspace='rgb', category='scene', tracks=tracks, activities=activities, framerate=30)
return ims
def EmptyScene():
"""Return an empty scene"""
return vipy.video.Scene(array=np.zeros((1,1,1,3), dtype=np.uint8))
|
test_telnetlib.py
|
import socket
import selectors
import telnetlib
import time
import contextlib
from unittest import TestCase
from test import support
threading = support.import_module('threading')
HOST = support.HOST
def server(evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testGetters(self):
# Test telnet getter methods
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
t_sock = telnet.sock
self.assertEqual(telnet.get_socket(), t_sock)
self.assertEqual(telnet.fileno(), t_sock.fileno())
telnet.sock.close()
class SocketStub(object):
''' a socket proxy that re-defines sendall() '''
def __init__(self, reads=()):
self.reads = list(reads) # Intentionally make a copy.
self.writes = []
self.block = False
def sendall(self, data):
self.writes.append(data)
def recv(self, size):
out = b''
while self.reads and len(out) < size:
out += self.reads.pop(0)
if len(out) > size:
self.reads.insert(0, out[size:])
out = out[:size]
return out
class TelnetAlike(telnetlib.Telnet):
def fileno(self):
raise NotImplementedError()
def close(self): pass
def sock_avail(self):
return (not self.sock.block)
def msg(self, msg, *args):
with support.captured_stdout() as out:
telnetlib.Telnet.msg(self, msg, *args)
self._messages += out.getvalue()
return
class MockSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
@property
def resolution(self):
return 1e-3
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout=None):
block = False
for fileobj in self.keys:
if isinstance(fileobj, TelnetAlike):
block = fileobj.sock.block
break
if block:
return []
else:
return [(key, key.events) for key in self.keys.values()]
def get_map(self):
return self.keys
@contextlib.contextmanager
def test_socket(reads):
def new_conn(*ignored):
return SocketStub(reads)
try:
old_conn = socket.create_connection
socket.create_connection = new_conn
yield None
finally:
socket.create_connection = old_conn
return
def test_telnet(reads=(), cls=TelnetAlike):
''' return a telnetlib.Telnet object that uses a SocketStub with
reads queued up to be read '''
for x in reads:
assert type(x) is bytes, x
with test_socket(reads):
telnet = cls('dummy', 0)
telnet._messages = '' # debuglevel output
return telnet
class ExpectAndReadTestCase(TestCase):
def setUp(self):
self.old_selector = telnetlib._TelnetSelector
telnetlib._TelnetSelector = MockSelector
def tearDown(self):
telnetlib._TelnetSelector = self.old_selector
class ReadTests(ExpectAndReadTestCase):
def test_read_until(self):
"""
read_until(expected, timeout=None)
test the blocking version of read_util
"""
want = [b'xxxmatchyyy']
telnet = test_telnet(want)
data = telnet.read_until(b'match')
self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))
reads = [b'x' * 50, b'match', b'y' * 50]
expect = b''.join(reads[:-1])
telnet = test_telnet(reads)
data = telnet.read_until(b'match')
self.assertEqual(data, expect)
def test_read_all(self):
"""
read_all()
Read all data until EOF; may block.
"""
reads = [b'x' * 500, b'y' * 500, b'z' * 500]
expect = b''.join(reads)
telnet = test_telnet(reads)
data = telnet.read_all()
self.assertEqual(data, expect)
return
def test_read_some(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
telnet = test_telnet([b'x' * 500])
data = telnet.read_some()
self.assertTrue(len(data) >= 1)
# test EOF
telnet = test_telnet()
data = telnet.read_some()
self.assertEqual(b'', data)
def _read_eager(self, func_name):
"""
read_*_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = b'x' * 100
telnet = test_telnet([want])
func = getattr(telnet, func_name)
telnet.sock.block = True
self.assertEqual(b'', func())
telnet.sock.block = False
data = b''
while True:
try:
data += func()
except EOFError:
break
self.assertEqual(data, want)
def test_read_eager(self):
# read_eager and read_very_eager make the same gaurantees
# (they behave differently but we only test the gaurantees)
self._read_eager('read_eager')
self._read_eager('read_very_eager')
# NB -- we need to test the IAC block which is mentioned in the
# docstring but not in the module docs
def read_very_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_very_lazy())
while telnet.sock.reads:
telnet.fill_rawq()
data = telnet.read_very_lazy()
self.assertEqual(want, data)
self.assertRaises(EOFError, telnet.read_very_lazy)
def test_read_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_lazy())
data = b''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want.startswith(data))
self.assertEqual(data, want)
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = b''
self.sb_getter = sb_getter
self.sb_seen = b''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class WriteTests(TestCase):
'''The only thing that write does is replace each tl.IAC for
tl.IAC+tl.IAC'''
def test_write(self):
data_sample = [b'data sample without IAC',
b'data sample with' + tl.IAC + b' one IAC',
b'a few' + tl.IAC + tl.IAC + b' iacs' + tl.IAC,
tl.IAC,
b'']
for data in data_sample:
telnet = test_telnet()
telnet.write(data)
written = b''.join(telnet.sock.writes)
self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written)
class OptionTests(TestCase):
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
telnet = test_telnet(data)
data_len = len(b''.join(data))
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[:1], self.cmds)
self.assertEqual(cmd[1:2], tl.NOOPT)
self.assertEqual(data_len, len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
def test_IAC_commands(self):
for cmd in self.cmds:
self._test_command([tl.IAC, cmd])
self._test_command([b'x' * 100, tl.IAC, cmd, b'y'*100])
self._test_command([b'x' * 10, tl.IAC, cmd, b'y'*10])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds])
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + b'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'cc' + tl.IAC + tl.IAC + b'dd' + tl.IAC + tl.SE,
]
telnet = test_telnet(send)
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, b'')
want_sb_data = tl.IAC + tl.IAC + b'aabb' + tl.IAC + b'cc' + tl.IAC + b'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual(b'', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
def test_debuglevel_reads(self):
# test all the various places that self.msg(...) is called
given_a_expect_b = [
# Telnet.fill_rawq
(b'a', ": recv b''\n"),
# Telnet.process_rawq
(tl.IAC + bytes([88]), ": IAC 88 not recognized\n"),
(tl.IAC + tl.DO + bytes([1]), ": IAC DO 1\n"),
(tl.IAC + tl.DONT + bytes([1]), ": IAC DONT 1\n"),
(tl.IAC + tl.WILL + bytes([1]), ": IAC WILL 1\n"),
(tl.IAC + tl.WONT + bytes([1]), ": IAC WONT 1\n"),
]
for a, b in given_a_expect_b:
telnet = test_telnet([a])
telnet.set_debuglevel(1)
txt = telnet.read_all()
self.assertIn(b, telnet._messages)
return
def test_debuglevel_write(self):
telnet = test_telnet()
telnet.set_debuglevel(1)
telnet.write(b'xxx')
expected = "send b'xxx'\n"
self.assertIn(expected, telnet._messages)
def test_debug_accepts_str_port(self):
# Issue 10695
with test_socket([]):
telnet = TelnetAlike('dummy', '0')
telnet._messages = ''
telnet.set_debuglevel(1)
telnet.msg('test')
self.assertRegex(telnet._messages, r'0.*test')
class ExpectTests(ExpectAndReadTestCase):
def test_expect(self):
"""
expect(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
"""
want = [b'x' * 10, b'match', b'y' * 10]
telnet = test_telnet(want)
(_,_,data) = telnet.expect([b'match'])
self.assertEqual(data, b''.join(want[:-1]))
if __name__ == '__main__':
unittest.main()
|
app.py
|
#/usr/bin/python3
# https://github.com/tnware/product-checker
# by Tyler Woods
# coded for Bird Bot and friends
# https://tylermade.net
import requests
import time
import json
import random
from datetime import datetime
import urllib.parse as urlparse
from urllib.parse import parse_qs
#import webhook_settings
#import product_settings
from threading import Thread
from selenium import webdriver
from chromedriver_py import binary_path as driver_path
from lxml import html
stockdict = {}
sku_dict = {}
bestbuylist = []
targetlist = []
walmartlist = []
bhlist = []
bbdict = {}
bbimgdict = {}
amazonlist = []
gamestoplist = []
#Function for start-up menu
MAX_PRICE = 315
def menu():
webhook_dict = return_data("./data/webhooks.json")
urldict = return_data("./data/products.json")
#print("Select an Option: \n 1: Edit Webhooks \n 2: Edit Product URLs \n 3: Run the product tracker \n")
#val = input("Enter # (1-3)")
# if val == "1":
# webhook_settings.main()
# menu()
# elif val == "2":
# product_settings.main()
# menu()
#if val == "3":
# print("\n \n Starting Product Tracker! \n \n")
#else:
# menu()
def send_email(message):
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
print ("Sending Email")
message = Mail(
from_email='[email protected]',
to_emails='[email protected]',
subject='In Stock',
html_content=message )
try:
sg = SendGridAPIClient(key)
response = sg.send(message)
except Exception as e:
print ("Error Sending Message ")
print(e)
def return_data(path):
with open(path,"r") as file:
data = json.load(file)
file.close()
return data
#Prompt the user at startup
menu()
#Only declare the webhook and product lists after the menu has been passed so that changes made from menu selections are up to date
webhook_dict = return_data("./data/webhooks.json")
urldict = return_data("./data/products.json")
#Declare classes for the webpage scraping functionality
class Amazon:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
now = datetime.now()
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--user-agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36"')
options.add_argument("headless")
driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)
driver.get(url)
html = driver.page_source
if "To discuss automated access to Amazon data please contact [email protected]." in html:
print("Amazons Bot Protection is preventing this call.")
else:
status_raw = driver.find_element_by_xpath("//div[@id='olpOfferList']")
status_text = status_raw.text
title_raw = driver.find_element_by_xpath("//h1[@class='a-size-large a-spacing-none']")
title_text = title_raw.text
title = title_text
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
if "Currently, there are no sellers that can deliver this item to your location." not in status_text:
print("[" + current_time + "] " + "In Stock: (Amazon.com) " + title + " - " + url)
slack_data = {'text' :"In Stock: " + url, 'content': "[" + current_time + "] " + title + " in stock at Amazon - " + url}
if stockdict.get(url) == 'False':
send_email("[" + current_time + "] " + "In Stock: (Amazon.com) " + title + " - " + url)
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Amazon.com) " + title)
stockdict.update({url: 'False'})
driver.quit()
class Gamestop:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
now = datetime.now()
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
options = webdriver.ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument('log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--user-agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36"')
options.add_argument("headless")
driver = webdriver.Chrome(executable_path=driver_path, chrome_options=options)
driver.get(url)
html = driver.page_source
status_raw = driver.find_element_by_xpath("//div[@class='add-to-cart-buttons']")
status_text = status_raw.text
title_raw = driver.find_element_by_xpath("//h1[@class='product-name h2']")
title_text = title_raw.text
title = title_text
image_raw = driver.find_element_by_xpath("//img[@class='mainImg ae-img']")
img = image_raw.get_attribute('src')
if "ADD TO CART" in status_text:
print("[" + current_time + "] " + "In Stock: (Gamestop.com) " + title + " - " + url)
slack_data = {
'username': "GameStop Bot",
'content': "GameStop Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at GameStop",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
send_email("[" + current_time + "] " + "In Stock: (Gamestop.com) " + title + " - " + url)
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Gamestop.com) " + title)
stockdict.update({url: 'False'})
driver.quit()
class Target:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
now = datetime.now()
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
# UserLocation: "87114|35.18954849243164|-106.66831970214844|NM|US"
# GuestLocation: "87114|35.18954849243164|-106.66831970214844|NM|US"
cookies = {"UserLocation": "87114|35.18954849243164|-106.66831970214844|NM|US", "GuestLocation": "87114|35.18954849243164|-106.66831970214844|NM|US"}
page = requests.get(url)
al = page.text
tree = html.fromstring(page.content)
imgs = tree.xpath("//img[1]")
img_raw = str(imgs[0].attrib)
img = img_raw[20:-2]
title = al[al.find('"twitter":{"title":') + 20 : al.find('","card')]
#print(title)
if "Temporarily out of stock" in page.text:
print("[" + current_time + "] " + "Sold Out: (Target.com) " + title)
stockdict.update({url: 'False'})
else:
f = open('temp.html', 'w')
f.write(str(page.content))
f.close()
f = open('target.txt', 'a')
f.write("[" + current_time + "] " + "In Stock: (Target.com) " + title + " - " + url + "\n")
f.close()
print("[" + current_time + "] " + "In Stock: (Target.com) " + title + " - " + url)
slack_data = {
'username': "Target Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/target.png",
'content': "Target Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Target",
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
send_email("[" + current_time + "] " + "In Stock: (Target.com) " + title + " - " + url)
stockdict.update({url: 'True'})
#print(stockdict)
return True
class BestBuy:
def __init__(self, sku, hook):
self.sku = sku
self.hook = hook
webhook_url = webhook_dict[hook]
now = datetime.now()
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
url = "https://www.bestbuy.com/api/tcfb/model.json?paths=%5B%5B%22shop%22%2C%22scds%22%2C%22v2%22%2C%22page%22%2C%22tenants%22%2C%22bbypres%22%2C%22pages%22%2C%22globalnavigationv5sv%22%2C%22header%22%5D%2C%5B%22shop%22%2C%22buttonstate%22%2C%22v5%22%2C%22item%22%2C%22skus%22%2C" + sku + "%2C%22conditions%22%2C%22NONE%22%2C%22destinationZipCode%22%2C%22%2520%22%2C%22storeId%22%2C%22%2520%22%2C%22context%22%2C%22cyp%22%2C%22addAll%22%2C%22false%22%5D%5D&method=get"
headers2 = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.69 Safari/537.36"
}
cookies = {"customerZipCode": "87114|Y", "locDestZip": "87114"}
page = requests.get(url, headers=headers2, cookies=cookies)
link = "https://www.bestbuy.com/site/" + sku + ".p?skuId=" + sku
al = page.text
search_string = '"skuId":"' + sku + '","buttonState":"'
stock_status = al[al.find(search_string) + 33 : al.find('","displayText"')]
product_name = sku_dict.get(sku)
if stock_status == "SOLD_OUT":
print("[" + current_time + "] " + "Sold Out: (BestBuy.com) " + product_name)
stockdict.update({sku: 'False'})
elif stock_status == "CHECK_STORES":
print(product_name + " sold out @ BestBuy (check stores status)")
stockdict.update({sku: 'False'})
# f = open('bestbuy.txt', 'a')
# f.write("[" + current_time + "] " + product_name + " sold out @ BestBuy (check stores status \n")
# f.close()
else:
if stock_status == "ADD_TO_CART":
print (page.cookies.RequestsCookieJar())
print("[" + current_time + "] " + "In Stock: (BestBuy.com) " + product_name + " - " + link)
f = open('bestbuy.txt', 'a')
f.write("[" + current_time + "] " + "In Stock: (BestBuy.com) " + product_name + " - " + link + "\n")
f.close()
slack_data = {
'username': "BestBuy Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/bestbuy.png",
'content': "BestBuy Stock Alert:",
'embeds': [{
'title': product_name,
'description': product_name + " in stock at BestBuy",
'url': link,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Status:",
"value": "In Stock"
}
],
'thumbnail': {
'url': bbimgdict.get(sku)
}
}]
}
if stockdict.get(sku) == 'False':
send_email("[" + current_time + "] " + "In Stock: (BestBuy.com) " + product_name + " - " + link)
stockdict.update({sku: 'True'})
#print(stockdict)
class Walmart:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
now = datetime.now()
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
page = requests.get(url)
tree = html.fromstring(page.content)
title_raw = tree.xpath("//h1[@class='prod-ProductTitle font-normal']")
title = title_raw[0].text
price_raw = tree.xpath("//span[@class='price display-inline-block arrange-fit price price--stylized']//span[@class='price-characteristic']")
price = price_raw[0].text
img_raw = tree.xpath("//meta[@property='og:image']/@content")
img = img_raw[0]
if page.status_code == 200:
if "Add to cart" in page.text and int(price) < MAX_PRICE:
print("[" + current_time + "] " + "In Stock: (Walmart.com) " + title + " for $" + price + " - " + url)
f = open('walmart.txt', 'a')
f.write("[" + current_time + "] " + "In Stock: (Walmart.com) " + title + " for $" + price + " - " + url + "\n")
f.close()
slack_data = {
'username': "Walmart Bot",
'avatar_url': "https://github.com/tnware/product-checker/raw/master/img/walmart.png",
'content': "Walmart Stock Alert:",
'embeds': [{
'title': title,
'description': title + " in stock at Walmart for $" + price,
'url': url,
"fields": [
{
"name": "Time:",
"value": current_time
},
{
"name": "Price:",
"value": "$" + price
}
],
'thumbnail': {
'url': img
}
}]
}
if stockdict.get(url) == 'False':
send_email("[" + current_time + "] " + "In Stock: (Walmart.com) " + title + " for $" + price + " - " + url)
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (Walmart.com) " + title)
stockdict.update({url: 'False'})
class BH:
def __init__(self, url, hook):
self.url = url
self.hook = hook
webhook_url = webhook_dict[hook]
page = requests.get(url)
now = datetime.now()
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
if page.status_code == 200:
if "Add to Cart" in page.text:
print("[" + current_time + "] " + "In Stock: (bhphotovideo.com) " + url)
f = open('bh.txt', 'a')
f.write("[" + current_time + "] " + "In Stock: (bhphotovideo.com) " + url + "\n")
f.close()
slack_data = {'text' :"In Stock: " + url, 'content': "[" + current_time + "] " + url + " in stock at B&H"}
if stockdict.get(url) == 'False':
send_email("[" + current_time + "] " + "In Stock: (bhphotovideo.com) " + url)
stockdict.update({url: 'True'})
else:
print("[" + current_time + "] " + "Sold Out: (bhphotovideo.com) " + url)
stockdict.update({url: 'False'})
#Classify all the URLs by site
for url in urldict:
hook = urldict[url] #get the hook for the url so it can be passed in to the per-site lists being generated below
#Amazon URL Detection
if "amazon.com" in url:
if "offer-listing" in url:
amazonlist.append(url)
print("Amazon detected using Webhook destination " + hook)
else:
print("Invalid Amazon link detected. Please use the Offer Listing page.")
#Target URL Detection
elif "gamestop.com" in url:
gamestoplist.append(url)
print("Gamestop URL detected using Webhook destination " + hook)
#BestBuy URL Detection
elif "bestbuy.com" in url:
print("BestBuy URL detected using Webhook destination " + hook)
parsed = urlparse.urlparse(url)
sku = parse_qs(parsed.query)['skuId']
sku = sku[0]
bestbuylist.append(sku)
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"cache-control": "max-age=0",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.69 Safari/537.36"
}
page = requests.get(url, headers=headers)
al = page.text
tree = html.fromstring(page.content)
img = tree.xpath('//img[@class="primary-image"]/@src')[0]
title = al[al.find('<title >') + 8 : al.find(' - Best Buy</title>')]
sku_dict.update({sku: title})
bbdict.update({sku: hook})
bbimgdict.update({sku: img})
#Target URL Detection
elif "target.com" in url:
targetlist.append(url)
print("Target URL detected using Webhook destination " + hook)
#Walmart URL Detection
elif "walmart.com" in url:
walmartlist.append(url)
print("Walmart URL detected using Webhook destination " + hook)
#B&H Photo URL Detection
elif "bhphotovideo.com" in url:
bhlist.append(url)
print("B&H URL detected using Webhook destination " + hook)
#set all URLs to be "out of stock" to begin
for url in urldict:
stockdict.update({url: 'False'})
#set all SKUs to be "out of stock" to begin
for sku in sku_dict:
stockdict.update({sku: 'False'})
#DECLARE SITE FUNCTIONS
def amzfunc(url):
while True:
hook = urldict[url]
try:
Amazon(url, hook)
except:
print("Some error ocurred parsing Amazon")
time.sleep(random.randint(10,60))
def gamestopfunc(url):
while True:
hook = urldict[url]
try:
Gamestop(url, hook)
except:
print("Some error ocurred parsing Gamestop")
time.sleep(random.randint(10,60))
def targetfunc(url):
while True:
hook = urldict[url]
try:
if Target(url, hook):
print ("Found one sleeping for a bit")
time.sleep(1800)
except Exception as e:
print("Some error ocurred parsing Target")
print (e)
time.sleep(random.randint(10,60))
def bhfunc(url):
while True:
hook = urldict[url]
try:
BH(url, hook)
except:
print("Some error ocurred parsing BH Photo")
time.sleep(random.randint(10,60))
def bestbuyfunc(sku):
while True:
hook = bbdict[sku]
try:
BestBuy(sku, hook)
except:
print("Some error ocurred parsing Best Buy")
time.sleep(random.randint(10,60))
def walmartfunc(url):
while True:
hook = urldict[url]
try:
Walmart(url, hook)
except:
print("Some error ocurred parsing WalMart")
time.sleep(random.randint(10,60))
# MAIN EXECUTION
for url in amazonlist:
t = Thread(target=amzfunc, args=(url,))
t.start()
time.sleep(0.5)
#for url in gamestoplist:
# t = Thread(target=gamestopfunc, args=(url,))
# t.start()
# time.sleep(0.5)
for url in targetlist:
t = Thread(target=targetfunc, args=(url,))
t.start()
time.sleep(0.5)
for url in bhlist:
t = Thread(target=bhfunc, args=(url,))
t.start()
time.sleep(0.5)
for sku in bestbuylist:
t = Thread(target=bestbuyfunc, args=(sku,))
t.start()
time.sleep(0.5)
for url in walmartlist:
t = Thread(target=walmartfunc, args=(url,))
t.start()
time.sleep(0.5)
|
video_async.py
|
import threading
class VideoCaptureAsync:
def __init__(self,camera):
self.camera = camera
self.grabbed, self.frame = self.camera.read()
self.started = False
self.read_lock = threading.Lock()
def startReading(self):
if self.started:
print('Started video capture async')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args = ())
self.thread.start()
def update(self):
while self.started:
grabbed, frame = self.camera.read()
with self.read_lock:
self.frame = frame
self.grabbed = grabbed
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self):
self.camera.stop()
|
simple_nn_parallel.py
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import mnist
import multiprocessing as mp
import ray
ray.init()
@ray.remote
def nn_predictions(model, objects):
results = []
for obj in objects:
res = model.predict(obj.reshape(1, -1))
results.append(np.argmax(res))
print(results)
return results
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print("Training data shape: ", x_train.shape)
print("Test data shape", x_test.shape)
image_vector_size = 28 * 28
x_train = x_train.reshape(x_train.shape[0], image_vector_size)
x_test = x_test.reshape(x_test.shape[0], image_vector_size)
num_classes = 10
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
print("First 5 training lables as one-hot encoded vectors:\n", y_train[:5])
image_size = 784
model = tf.keras.models.Sequential()
# The input layer requires the special input_shape parameter which should match
# the shape of our training data.
model.add(tf.keras.layers.Dense(units=32, activation='sigmoid', input_shape=(image_size,)))
model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax'))
# model.summary()
print("x_shape: ", x_train.shape)
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(x_train, y_train, batch_size=8, epochs=1, verbose=False, validation_split=.1)
loss, accuracy = model.evaluate(x_test, y_test, verbose=False)
num_processes = 5
jobs = []
# nn_predictions(model, x_test[0:5])
for i in range(num_processes):
process = mp.Process(name=f'background_process {i}', target=nn_predictions, args=(model, x_test[i * 5:(i + 1) * 5]))
process.daemon = False
jobs.append(process)
process.start()
# futures = [nn_predictions.remote(model, x_test[i*5:(i+1)*5]) for i in range(num_processes)]
# print(ray.get(futures))
|
A3C_rnn.py
|
"""
Asynchronous Advantage Actor Critic (A3C), Reinforcement Learning.
The BipedalWalker example.
View more on [莫烦Python] : https://morvanzhou.github.io/2_tensorflow_old/
Using:
tensorflow 1.8.0
gym 0.10.5
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
GAME = 'BipedalWalker-v2'
OUTPUT_GRAPH = False
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_GLOBAL_EP = 8000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10
GAMMA = 0.9
ENTROPY_BETA = 0.001
LR_A = 0.00002 # 1_tensorflow_new rate for actor
LR_C = 0.0001 # 1_tensorflow_new rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
del env
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self._build_net()
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v = self._build_net()
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
self.test = sigma[0]
mu, sigma = mu * A_BOUND[1], sigma + 1e-5
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1)), A_BOUND[0], A_BOUND[1])
with tf.name_scope('local_grad'):
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in
zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in
zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('critic'): # only critic controls the rnn update
cell_size = 126
s = tf.expand_dims(self.s, axis=1,
name='timely_input') # [time_step, feature] => [time_step, batch, feature]
rnn_cell = tf.contrib.rnn.BasicRNNCell(cell_size)
self.init_state = rnn_cell.zero_state(batch_size=1, dtype=tf.float32)
outputs, self.final_state = tf.nn.dynamic_rnn(
cell=rnn_cell, inputs=s, initial_state=self.init_state, time_major=True)
cell_out = tf.reshape(outputs, [-1, cell_size], name='flatten_rnn_outputs') # joined state representation
l_c = tf.layers.dense(cell_out, 512, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
with tf.variable_scope('actor'): # state representation is based on critic
cell_out = tf.stop_gradient(cell_out, name='c_cell_out') # from what critic think it is
l_a = tf.layers.dense(cell_out, 512, tf.nn.relu6, kernel_initializer=w_init, name='la')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma') # restrict variance
return mu, sigma, v
def update_global(self, feed_dict): # run by a local
_, _, t = SESS.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net
return t
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s, cell_state): # run by a local
s = s[np.newaxis, :]
a, cell_state = SESS.run([self.A, self.final_state], {self.s: s, self.init_state: cell_state})
return a, cell_state
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME)
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
rnn_state = SESS.run(self.AC.init_state) # zero rnn state at beginning
keep_state = rnn_state.copy() # keep rnn state for updating global net
while True:
if self.name == 'W_0' and total_step % 30 == 0:
self.env.render()
a, rnn_state_ = self.AC.choose_action(s, rnn_state) # get the action and next rnn state
s_, r, done, info = self.env.step(a)
if r == -100: r = -2
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :], self.AC.init_state: rnn_state_})[
0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(
buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
self.AC.init_state: keep_state,
}
test = self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
keep_state = rnn_state_.copy() # replace the keep_state as the new initial rnn state_
s = s_
rnn_state = rnn_state_ # renew rnn state
total_step += 1
if done:
achieve = '| Achieve' if self.env.unwrapped.hull.position[0] >= 88 else '| -------'
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.95 * GLOBAL_RUNNING_R[-1] + 0.05 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
achieve,
"| Pos: %i" % self.env.unwrapped.hull.position[0],
"| RR: %.1f" % GLOBAL_RUNNING_R[-1],
'| EpR: %.1f' % ep_r,
'| var:', test,
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA', decay=0.95)
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC', decay=0.95)
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
t = threading.Thread(target=worker.work)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
import matplotlib.pyplot as plt
plt.plot(GLOBAL_RUNNING_R)
plt.xlabel('episode')
plt.ylabel('global running reward')
plt.show()
|
testrunner.py
|
# Copyright 2010 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by NSN
# Copyright 2010-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Queue import Empty, Queue
import SocketServer
import atexit
import codecs
import os
import pickle
import shutil
import socket
import subprocess
import tempfile
import threading
import signal
import sys
from robot.output.loggerhelper import LEVELS
from robot.utils.encoding import SYSTEM_ENCODING
from robotide.context.platform import IS_WINDOWS
from robotide.contrib.testrunner import TestRunnerAgent
from robotide.controller.testexecutionresults import TestExecutionResults
import robotide.utils as utils
ATEXIT_LOCK = threading.RLock()
class TestRunner(object):
def __init__(self, chief):
self._output_dir = None
self._process = None
self._server = None
self._server_thread = None
self._pause_on_failure = False
self._results = TestExecutionResults()
self.port = None
self._chief = chief
self.profiles = {}
def enable(self, result_handler):
self._start_listener_server(result_handler)
self._create_temporary_directory()
def _create_temporary_directory(self):
self._output_dir = tempfile.mkdtemp(".d", "RIDE")
atexit.register(self._remove_temporary_directory)
# this plugin creates a temporary directory which _should_
# get reaped at exit. Sometimes things happen which might
# cause it to not get deleted. Maybe this would be a good
# place to check for temporary directories that match the
# signature and delete them if they are more than a few
# days old...
def _remove_temporary_directory(self):
with ATEXIT_LOCK:
if os.path.exists(self._output_dir):
shutil.rmtree(self._output_dir)
def add_profile(self, name, item):
self.profiles[name] = item
def get_profile(self, name):
return self.profiles[name]
def get_profile_names(self):
return sorted(self.profiles.keys())
def _start_listener_server(self, result_handler):
def handle(*args):
self._result_handler(*args)
result_handler(*args)
self._server = RideListenerServer(RideListenerHandler, handle)
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
self.port = self._server.server_address[1]
def _result_handler(self, event, *args):
if event == 'pid':
self._pid_to_kill = int(args[0])
if event == 'port' and self._process:
self._process.set_port(args[0])
if event == 'start_test':
longname = args[1]['longname']
testname = args[0]
self._results.set_running(self._get_test_controller(longname, testname))
if event == 'end_test':
longname = args[1]['longname']
testname = args[0]
if args[1]['status'] == 'PASS':
self._results.set_passed(self._get_test_controller(longname, testname))
else:
self._results.set_failed(self._get_test_controller(longname, testname))
def _get_test_controller(self, longname, testname = None):
ret = self._chief.find_controller_by_longname(longname, testname)
return ret
def clear_server(self):
self._server = None
def shutdown_server(self):
if self._server:
self._server.shutdown()
def test_execution_started(self):
self._results.test_execution_started()
def kill_process(self):
if self._process:
self._process.kill(force=True)
def set_pause_on_failure(self, pause):
self._pause_on_failure = pause
self._send_pause_on_failure_information()
def _send_pause_on_failure_information(self):
if self._process:
self._process.pause_on_failure(self._pause_on_failure)
def send_stop_signal(self):
if self._process:
self._process.kill(killer_pid=self._pid_to_kill)
def send_pause_signal(self):
if self._process:
self._process.pause()
def send_continue_signal(self):
if self._process:
self._process.resume()
def send_step_next_signal(self):
if self._process:
self._process.step_next()
def send_step_over_signal(self):
if self._process:
self._process.step_over()
def run_command(self, command, cwd):
self._pid_to_kill = None
self._process = Process(cwd)
self._process.run_command(command)
def get_command(self, profile, pythonpath, monitor_width, test_names):
'''Return the command (as a list) used to run the test'''
command = profile.get_command_prefix()[:]
argfile = os.path.join(self._output_dir, "argfile.txt")
command.extend(["--argumentfile", argfile])
command.extend(["--listener", self._get_listener_to_cmd()])
command.append(self._get_suite_source_for_command())
self._write_argfile(argfile, self._create_standard_args(command, profile, pythonpath, monitor_width, test_names))
return command
def get_message_log_level(self, command):
min_log_level_number = LEVELS['INFO']
if '-L' in command:
switch = '-L'
elif '--loglevel' in command:
switch = '--loglevel'
else:
return min_log_level_number
i = command.index(switch)
if len(command) == i:
return
level = command[i+1].upper().split(':')[0]
return LEVELS.get(level, min_log_level_number)
def _get_listener_to_cmd(self):
path = os.path.abspath(TestRunnerAgent.__file__)
if path[-1] in ['c', 'o']:
path = path[:-1]
return '%s:%s:%s' % (path, self.port, self._pause_on_failure)
def _get_suite_source_for_command(self):
cur = os.path.abspath(os.path.curdir)
source = os.path.abspath(self._chief.suite.source)
if not utils.is_same_drive(cur, source):
return source
return os.path.abspath(self._chief.suite.source)
def _create_standard_args(self, command, profile, pythonpath, monitor_width, test_names):
standard_args = []
standard_args.extend(profile.get_custom_args())
self._add_tmp_outputdir_if_not_given_by_user(command, standard_args)
self._add_pythonpath_if_in_settings_and_not_given_by_user(command,
standard_args,
pythonpath)
standard_args.extend(["--monitorcolors", "off"])
standard_args.extend(["--monitorwidth", monitor_width])
for tc in test_names:
standard_args += ['--test', tc]
return standard_args
def _add_tmp_outputdir_if_not_given_by_user(self, command, standard_args):
if "--outputdir" not in command and "-d" not in command:
standard_args.extend(["--outputdir", self._output_dir])
def _add_pythonpath_if_in_settings_and_not_given_by_user(self, command, standard_args, pythonpath):
if '--pythonpath' in command:
return
if '-P' in command:
return
if not pythonpath:
return
standard_args.extend(['--pythonpath', ':'.join(pythonpath)])
def _write_argfile(self, argfile, args):
f = codecs.open(argfile, "w", "utf-8")
f.write("\n".join(args))
f.close()
def get_output_and_errors(self):
return self._process.get_output(), self._process.get_errors()
def is_running(self):
return self._process and self._process.is_alive()
def command_ended(self):
self._process = None
class Process(object):
def __init__(self, cwd):
self._process = None
self._error_stream = None
self._output_stream = None
self._cwd = cwd
self._port = None
self._sock = None
def run_command(self, command):
# We need to supply an stdin for subprocess, because otherways in pythonw
# subprocess will try using sys.stdin which will cause an error in windows
subprocess_args = dict(bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=self._cwd.encode(SYSTEM_ENCODING))
if IS_WINDOWS:
startupinfo = subprocess.STARTUPINFO()
try:
import _subprocess
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
except ImportError:
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess_args['startupinfo'] = startupinfo
else:
subprocess_args['preexec_fn'] = os.setsid
subprocess_args['shell'] = True
self._process = subprocess.Popen(command.encode(SYSTEM_ENCODING), **subprocess_args)
self._process.stdin.close()
self._output_stream = StreamReaderThread(self._process.stdout)
self._error_stream = StreamReaderThread(self._process.stderr)
self._output_stream.run()
self._error_stream.run()
self._kill_called = False
def set_port(self, port):
self._port = port
def get_output(self):
return self._output_stream.pop()
def get_errors(self):
return self._error_stream.pop()
def is_alive(self):
return self._process.poll() is None
def wait(self):
self._process.wait()
def kill(self, force=False, killer_pid=None):
if not self._process:
return
if force:
self._process.kill()
self.resume() # Send so that RF is not blocked
if IS_WINDOWS and not self._kill_called and self._port is not None:
self._signal_kill_with_listener_server()
self._kill_called = True
else:
self._kill(killer_pid or self._process.pid)
def _signal_kill_with_listener_server(self):
self._send_socket('kill')
def _send_socket(self, data):
if self._port is None:
return # Silent failure..
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('localhost', self._port))
sock.send(data)
finally:
sock.close()
def pause(self):
self._send_socket('pause')
def pause_on_failure(self, pause):
if pause:
self._send_socket('pause_on_failure')
else:
self._send_socket('do_not_pause_on_failure')
def resume(self):
self._send_socket('resume')
def step_next(self):
self._send_socket('step_next')
def step_over(self):
self._send_socket('step_over')
def _kill(self, pid):
if pid:
try:
if os.name == 'nt' and sys.version_info < (2,7):
import ctypes
ctypes.windll.kernel32.TerminateProcess(int(self._process._handle), -1)
else:
os.kill(pid, signal.SIGINT)
except OSError:
pass
class StreamReaderThread(object):
def __init__(self, stream):
self._queue = Queue()
self._thread = None
self._stream = stream
def run(self):
self._thread = threading.Thread(target=self._enqueue_output, args=(self._stream,))
self._thread.daemon = True
self._thread.start()
def _enqueue_output(self, out):
for line in iter(out.readline, b''):
self._queue.put(line)
def pop(self):
result = ""
for _ in xrange(self._queue.qsize()):
try:
result += self._queue.get_nowait()
except Empty:
pass
return result.decode('UTF-8')
# The following two classes implement a small line-buffered socket
# server. It is designed to run in a separate thread, read data
# from the given port and update the UI -- hopefully all in a
# thread-safe manner.
class RideListenerServer(SocketServer.TCPServer):
"""Implements a simple line-buffered socket server"""
allow_reuse_address = True
def __init__(self, RequestHandlerClass, callback):
SocketServer.TCPServer.__init__(self, ("",0), RequestHandlerClass)
self.callback = callback
class RideListenerHandler(SocketServer.StreamRequestHandler):
def handle(self):
unpickler = pickle.Unpickler(self.request.makefile('r'))
while True:
try:
(name, args) = unpickler.load()
self.server.callback(name, *args)
except (EOFError, IOError):
# I should log this...
break
|
main.py
|
from crosshair import Crosshair
import json
import threading
def reload_crosshair(c = None):
if c:
c.allow_draw = False
try:
with open("config.json", "r") as f:
config = json.loads(f.read())
c = Crosshair(config["color"], (config["thickness"], config["length"], config["offset"], config["outline"]), config["set_pixel_fps"])
except:
print("Config error. Using default config.")
c = Crosshair()
c.create_crosshair_matrix()
c_thread = threading.Thread(target=c.draw_crosshair_pixels)
c_thread.daemon = True
c_thread.start()
return c
def update_config(key, value):
config = {
"color": "(0,255,0,255)",
"length": 3,
"offset": 2,
"set_pixel_fps": 500,
"thickness": 1,
"outline": 1
}
try:
with open("config.json", "r") as f:
config = json.loads(f.read())
except:
with open("config.json", "w") as f:
f.write("")
if key == "color":
config[key] = value
else:
config[key] = int(value)
with open("config.json", "w") as f:
f.write(json.dumps(config, indent=4, sort_keys=True))
def main():
c = reload_crosshair()
commands = ["thickness", "length", "offset", "color", "set_pixel_fps", "outline"]
command = ""
while command != "exit":
command = input("crosshair> ")
try:
key, value = command.split(" ")
if key in commands:
update_config(key, value)
except:
if command not in ("exit", ""):
print("Invalid command\n")
print("Commands :")
for command in commands:
print(command + " <value>")
print("\n")
c = reload_crosshair(c)
if __name__ == "__main__":
main()
|
tensorboard.py
|
"Provides convenient callbacks for Learners that write model images, metrics/losses, stats and histograms to Tensorboard"
from ..basic_train import Learner
from ..basic_data import DatasetType, DataBunch
from ..vision import Image
from ..vision.gan import GANLearner
from ..callbacks import LearnerCallback
from ..core import *
from ..torch_core import *
from threading import Thread, Event
from time import sleep
from queue import Queue
import statistics
import torchvision.utils as vutils
from abc import ABC
#This is an optional dependency in fastai. Must install separately.
try: from tensorboardX import SummaryWriter
except: print("To use this tracker, please run 'pip install tensorboardx'. Also you must have Tensorboard running to see results")
__all__=['LearnerTensorboardWriter', 'GANTensorboardWriter', 'ImageGenTensorboardWriter']
#---Example usage (applies to any of the callbacks)---
# proj_id = 'Colorize'
# tboard_path = Path('data/tensorboard/' + proj_id)
# learn.callback_fns.append(partial(GANTensorboardWriter, base_dir=tboard_path, name='GanLearner'))
class LearnerTensorboardWriter(LearnerCallback):
"Broadly useful callback for Learners that writes to Tensorboard. Writes model histograms, losses/metrics, and gradient stats."
def __init__(self, learn:Learner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500, stats_iters:int=100):
super().__init__(learn=learn)
self.base_dir,self.name,self.loss_iters,self.hist_iters,self.stats_iters = base_dir,name,loss_iters,hist_iters,stats_iters
log_dir = base_dir/name
self.tbwriter = SummaryWriter(str(log_dir))
self.hist_writer = HistogramTBWriter()
self.stats_writer = ModelStatsTBWriter()
self.graph_writer = GraphTBWriter()
self.data = None
self.metrics_root = '/metrics/'
self._update_batches_if_needed()
def _get_new_batch(self, ds_type:DatasetType)->Collection[Tensor]:
"Retrieves new batch of DatasetType, and detaches it."
return self.learn.data.one_batch(ds_type=ds_type, detach=True, denorm=False, cpu=False)
def _update_batches_if_needed(self)->None:
"one_batch function is extremely slow with large datasets. This is caching the result as an optimization."
if self.learn.data.valid_dl is None: return # Running learning rate finder, so return
update_batches = self.data is not self.learn.data
if not update_batches: return
self.data = self.learn.data
self.trn_batch = self._get_new_batch(ds_type=DatasetType.Train)
self.val_batch = self._get_new_batch(ds_type=DatasetType.Valid)
def _write_model_stats(self, iteration:int)->None:
"Writes gradient statistics to Tensorboard."
self.stats_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
def _write_training_loss(self, iteration:int, last_loss:Tensor)->None:
"Writes training loss to Tensorboard."
scalar_value = to_np(last_loss)
tag = self.metrics_root + 'train_loss'
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
def _write_weight_histograms(self, iteration:int)->None:
"Writes model weight histograms to Tensorboard."
self.hist_writer.write(model=self.learn.model, iteration=iteration, tbwriter=self.tbwriter)
def _write_scalar(self, name:str, scalar_value, iteration:int)->None:
"Writes single scalar value to Tensorboard."
tag = self.metrics_root + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
#TODO: Relying on a specific hardcoded start_idx here isn't great. Is there a better solution?
def _write_metrics(self, iteration:int, last_metrics:MetricsList, start_idx:int=2)->None:
"Writes training metrics to Tensorboard."
recorder = self.learn.recorder
for i, name in enumerate(recorder.names[start_idx:]):
if last_metrics is None or len(last_metrics) < i+1: return
scalar_value = last_metrics[i]
self._write_scalar(name=name, scalar_value=scalar_value, iteration=iteration)
def on_train_begin(self, **kwargs: Any) -> None:
self.graph_writer.write(model=self.learn.model, tbwriter=self.tbwriter,
input_to_model=next(iter(self.learn.data.dl(DatasetType.Single)))[0])
def on_batch_end(self, last_loss:Tensor, iteration:int, train:bool, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
if iteration == 0 or not train: return
self._update_batches_if_needed()
if iteration % self.loss_iters == 0: self._write_training_loss(iteration=iteration, last_loss=last_loss)
if iteration % self.hist_iters == 0: self._write_weight_histograms(iteration=iteration)
# Doing stuff here that requires gradient info, because they get zeroed out afterwards in training loop
def on_backward_end(self, iteration:int, train:bool, **kwargs)->None:
"Callback function that writes backward end appropriate data to Tensorboard."
if iteration == 0 and not train: return
self._update_batches_if_needed()
if iteration % self.stats_iters == 0: self._write_model_stats(iteration=iteration)
def on_epoch_end(self, last_metrics:MetricsList, iteration:int, **kwargs)->None:
"Callback function that writes epoch end appropriate data to Tensorboard."
self._write_metrics(iteration=iteration, last_metrics=last_metrics)
# TODO: We're overriding almost everything here. Seems like a good idea to question that ("is a" vs "has a")
class GANTensorboardWriter(LearnerTensorboardWriter):
"Callback for GANLearners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self, learn:GANLearner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500,
stats_iters:int=100, visual_iters:int=100):
super().__init__(learn=learn, base_dir=base_dir, name=name, loss_iters=loss_iters, hist_iters=hist_iters, stats_iters=stats_iters)
self.visual_iters = visual_iters
self.img_gen_vis = ImageTBWriter()
self.gen_stats_updated = True
self.crit_stats_updated = True
def _write_weight_histograms(self, iteration:int)->None:
"Writes model weight histograms to Tensorboard."
generator, critic = self.learn.gan_trainer.generator, self.learn.gan_trainer.critic
self.hist_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='generator')
self.hist_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='critic')
def _write_gen_model_stats(self, iteration:int)->None:
"Writes gradient statistics for generator to Tensorboard."
generator = self.learn.gan_trainer.generator
self.stats_writer.write(model=generator, iteration=iteration, tbwriter=self.tbwriter, name='gen_model_stats')
self.gen_stats_updated = True
def _write_critic_model_stats(self, iteration:int)->None:
"Writes gradient statistics for critic to Tensorboard."
critic = self.learn.gan_trainer.critic
self.stats_writer.write(model=critic, iteration=iteration, tbwriter=self.tbwriter, name='crit_model_stats')
self.crit_stats_updated = True
def _write_model_stats(self, iteration:int)->None:
"Writes gradient statistics to Tensorboard."
# We don't want to write stats when model is not iterated on and hence has zeroed out gradients
gen_mode = self.learn.gan_trainer.gen_mode
if gen_mode and not self.gen_stats_updated: self._write_gen_model_stats(iteration=iteration)
if not gen_mode and not self.crit_stats_updated: self._write_critic_model_stats(iteration=iteration)
def _write_training_loss(self, iteration:int, last_loss:Tensor)->None:
"Writes training loss to Tensorboard."
recorder = self.learn.gan_trainer.recorder
if len(recorder.losses) == 0: return
scalar_value = to_np((recorder.losses[-1:])[0])
tag = self.metrics_root + 'train_loss'
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
def _write_images(self, iteration:int)->None:
"Writes model generated, original and real images to Tensorboard."
trainer = self.learn.gan_trainer
#TODO: Switching gen_mode temporarily seems a bit hacky here. Certainly not a good side-effect. Is there a better way?
gen_mode = trainer.gen_mode
try:
trainer.switch(gen_mode=True)
self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch,
iteration=iteration, tbwriter=self.tbwriter)
finally: trainer.switch(gen_mode=gen_mode)
def on_batch_end(self, iteration:int, train:bool, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
super().on_batch_end(iteration=iteration, train=train, **kwargs)
if iteration == 0 and not train: return
if iteration % self.visual_iters == 0: self._write_images(iteration=iteration)
def on_backward_end(self, iteration:int, train:bool, **kwargs)->None:
"Callback function that writes backward end appropriate data to Tensorboard."
if iteration == 0 and not train: return
self._update_batches_if_needed()
#TODO: This could perhaps be implemented as queues of requests instead but that seemed like overkill.
# But I'm not the biggest fan of maintaining these boolean flags either... Review pls.
if iteration % self.stats_iters == 0: self.gen_stats_updated, self.crit_stats_updated = False, False
if not (self.gen_stats_updated and self.crit_stats_updated): self._write_model_stats(iteration=iteration)
class ImageGenTensorboardWriter(LearnerTensorboardWriter):
"Callback for non-GAN image generating Learners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self, learn:Learner, base_dir:Path, name:str, loss_iters:int=25, hist_iters:int=500, stats_iters:int=100,
visual_iters:int=100):
super().__init__(learn=learn, base_dir=base_dir, name=name, loss_iters=loss_iters, hist_iters=hist_iters,
stats_iters=stats_iters)
self.visual_iters = visual_iters
self.img_gen_vis = ImageTBWriter()
def _write_images(self, iteration:int)->None:
"Writes model generated, original and real images to Tensorboard"
self.img_gen_vis.write(learn=self.learn, trn_batch=self.trn_batch, val_batch=self.val_batch, iteration=iteration,
tbwriter=self.tbwriter)
def on_batch_end(self, iteration:int, train:bool, **kwargs)->None:
"Callback function that writes batch end appropriate data to Tensorboard."
super().on_batch_end(iteration=iteration, train=train, **kwargs)
if iteration == 0 and not train: return
if iteration % self.visual_iters == 0: self._write_images(iteration=iteration)
class TBWriteRequest(ABC):
"A request object for Tensorboard writes. Useful for queuing up and executing asynchronous writes."
def __init__(self, tbwriter: SummaryWriter, iteration:int):
super().__init__()
self.tbwriter = tbwriter
self.iteration = iteration
@abstractmethod
def write(self)->None: pass
# SummaryWriter writes tend to block quite a bit. This gets around that and greatly boosts performance.
# Not all tensorboard writes are using this- just the ones that take a long time. Note that the
# SummaryWriter does actually use a threadsafe consumer/producer design ultimately to write to Tensorboard,
# so writes done outside of this async loop should be fine.
class AsyncTBWriter():
"Callback for GANLearners that writes to Tensorboard. Extends LearnerTensorboardWriter and adds output image writes."
def __init__(self):
super().__init__()
self.stop_request = Event()
self.queue = Queue()
self.thread = Thread(target=self._queue_processor, daemon=True)
self.thread.start()
def request_write(self, request: TBWriteRequest)->None:
"Queues up an asynchronous write request to Tensorboard."
if self.stop_request.isSet(): return
self.queue.put(request)
def _queue_processor(self)->None:
"Processes queued up write requests asynchronously to Tensorboard."
while not self.stop_request.isSet():
while not self.queue.empty():
if self.stop_request.isSet(): return
request = self.queue.get()
request.write()
sleep(0.2)
#Provided this to stop thread explicitly or by context management (with statement) but thread should end on its own
# upon program exit, due to being a daemon. So using this is probably unecessary.
def close(self)->None:
"Stops asynchronous request queue processing thread."
self.stop_request.set()
self.thread.join()
# Nothing to do, thread already started. Could start thread here to enforce use of context manager
# (but that sounds like a pain and a bit unweildy and unecessary for actual usage)
def __enter__(self): pass
def __exit__(self, exc_type, exc_value, traceback): self.close()
asyncTBWriter = AsyncTBWriter()
class ModelImageSet():
"Convenience object that holds the original, real(target) and generated versions of a single image fed to a model."
@staticmethod
def get_list_from_model(learn:Learner, ds_type:DatasetType, batch:Tuple)->[]:
"Factory method to convert a batch of model images to a list of ModelImageSet."
image_sets = []
x,y = batch[0],batch[1]
preds = learn.pred_batch(ds_type=ds_type, batch=(x,y), reconstruct=True)
for orig_px, real_px, gen in zip(x,y,preds):
orig, real = Image(px=orig_px), Image(px=real_px)
image_set = ModelImageSet(orig=orig, real=real, gen=gen)
image_sets.append(image_set)
return image_sets
def __init__(self, orig:Image, real:Image, gen:Image): self.orig, self.real, self.gen = orig, real, gen
class HistogramTBRequest(TBWriteRequest):
"Request object for model histogram writes to Tensorboard."
def __init__(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.params = [(name, values.clone().detach().cpu()) for (name, values) in model.named_parameters()]
self.name = name
def _write_histogram(self, param_name:str, values)->None:
"Writes single model histogram to Tensorboard."
tag = self.name + '/weights/' + param_name
self.tbwriter.add_histogram(tag=tag, values=values, global_step=self.iteration)
def write(self)->None:
"Writes model histograms to Tensorboard."
for param_name, values in self.params: self._write_histogram(param_name=param_name, values=values)
#If this isn't done async then this is sloooooow
class HistogramTBWriter():
"Writes model histograms to Tensorboard."
def __init__(self): super().__init__()
def write(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str='model')->None:
"Writes model histograms to Tensorboard."
request = HistogramTBRequest(model=model, iteration=iteration, tbwriter=tbwriter, name=name)
asyncTBWriter.request_write(request)
class ModelStatsTBRequest(TBWriteRequest):
"Request object for model gradient statistics writes to Tensorboard."
def __init__(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.gradients = [x.grad.clone().detach().cpu() for x in model.parameters() if x.grad is not None]
self.name = name
def _add_gradient_scalar(self, name:str, scalar_value)->None:
"Writes a single scalar value for a gradient statistic to Tensorboard."
tag = self.name + '/gradients/' + name
self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=self.iteration)
def _write_avg_norm(self, norms:[])->None:
"Writes the average norm of the gradients to Tensorboard."
avg_norm = sum(norms)/len(self.gradients)
self._add_gradient_scalar('avg_norm', scalar_value=avg_norm)
def _write_median_norm(self, norms:[])->None:
"Writes the median norm of the gradients to Tensorboard."
median_norm = statistics.median(norms)
self._add_gradient_scalar('median_norm', scalar_value=median_norm)
def _write_max_norm(self, norms:[])->None:
"Writes the maximum norm of the gradients to Tensorboard."
max_norm = max(norms)
self._add_gradient_scalar('max_norm', scalar_value=max_norm)
def _write_min_norm(self, norms:[])->None:
"Writes the minimum norm of the gradients to Tensorboard."
min_norm = min(norms)
self._add_gradient_scalar('min_norm', scalar_value=min_norm)
def _write_num_zeros(self)->None:
"Writes the number of zeroes in the gradients to Tensorboard."
gradient_nps = [to_np(x.data) for x in self.gradients]
num_zeros = sum((np.asarray(x) == 0.0).sum() for x in gradient_nps)
self._add_gradient_scalar('num_zeros', scalar_value=num_zeros)
def _write_avg_gradient(self)->None:
"Writes the average of the gradients to Tensorboard."
avg_gradient = sum(x.data.mean() for x in self.gradients)/len(self.gradients)
self._add_gradient_scalar('avg_gradient', scalar_value=avg_gradient)
def _write_median_gradient(self)->None:
"Writes the median of the gradients to Tensorboard."
median_gradient = statistics.median(x.data.median() for x in self.gradients)
self._add_gradient_scalar('median_gradient', scalar_value=median_gradient)
def _write_max_gradient(self)->None:
"Writes the maximum of the gradients to Tensorboard."
max_gradient = max(x.data.max() for x in self.gradients)
self._add_gradient_scalar('max_gradient', scalar_value=max_gradient)
def _write_min_gradient(self)->None:
"Writes the minimum of the gradients to Tensorboard."
min_gradient = min(x.data.min() for x in self.gradients)
self._add_gradient_scalar('min_gradient', scalar_value=min_gradient)
def write(self)->None:
"Writes model gradient statistics to Tensorboard."
if len(self.gradients) == 0: return
norms = [x.data.norm() for x in self.gradients]
self._write_avg_norm(norms=norms)
self._write_median_norm(norms=norms)
self._write_max_norm(norms=norms)
self._write_min_norm(norms=norms)
self._write_num_zeros()
self._write_avg_gradient()
self._write_median_gradient()
self._write_max_gradient()
self._write_min_gradient()
class ModelStatsTBWriter():
"Writes model gradient statistics to Tensorboard."
def write(self, model:nn.Module, iteration:int, tbwriter:SummaryWriter, name:str='model_stats')->None:
"Writes model gradient statistics to Tensorboard."
request = ModelStatsTBRequest(model=model, iteration=iteration, tbwriter=tbwriter, name=name)
asyncTBWriter.request_write(request)
class ImageTBRequest(TBWriteRequest):
"Request object for model image output writes to Tensorboard."
def __init__(self, learn:Learner, batch:Tuple, iteration:int, tbwriter:SummaryWriter, ds_type:DatasetType):
super().__init__(tbwriter=tbwriter, iteration=iteration)
self.image_sets = ModelImageSet.get_list_from_model(learn=learn, batch=batch, ds_type=ds_type)
self.ds_type = ds_type
def _write_images(self, name:str, images:[Tensor])->None:
"Writes list of images as tensors to Tensorboard."
tag = self.ds_type.name + ' ' + name
self.tbwriter.add_image(tag=tag, img_tensor=vutils.make_grid(images, normalize=True), global_step=self.iteration)
def _get_image_tensors(self)->([Tensor], [Tensor], [Tensor]):
"Gets list of image tensors from lists of Image objects, as a tuple of original, generated and real(target) images."
orig_images, gen_images, real_images = [], [], []
for image_set in self.image_sets:
orig_images.append(image_set.orig.px)
gen_images.append(image_set.gen.px)
real_images.append(image_set.real.px)
return orig_images, gen_images, real_images
def write(self)->None:
"Writes original, generated and real(target) images to Tensorboard."
orig_images, gen_images, real_images = self._get_image_tensors()
self._write_images(name='orig images', images=orig_images)
self._write_images(name='gen images', images=gen_images)
self._write_images(name='real images', images=real_images)
#If this isn't done async then this is noticeably slower
class ImageTBWriter():
"Writes model image output to Tensorboard."
def __init__(self): super().__init__()
def write(self, learn:Learner, trn_batch:Tuple, val_batch:Tuple, iteration:int, tbwriter:SummaryWriter)->None:
"Writes training and validation batch images to Tensorboard."
self._write_for_dstype(learn=learn, batch=val_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Valid)
self._write_for_dstype(learn=learn, batch=trn_batch, iteration=iteration, tbwriter=tbwriter, ds_type=DatasetType.Train)
def _write_for_dstype(self, learn:Learner, batch:Tuple, iteration:int, tbwriter:SummaryWriter, ds_type:DatasetType)->None:
"Writes batch images of specified DatasetType to Tensorboard."
request = ImageTBRequest(learn=learn, batch=batch, iteration=iteration, tbwriter=tbwriter, ds_type=ds_type)
asyncTBWriter.request_write(request)
class GraphTBRequest(TBWriteRequest):
"Request object for model histogram writes to Tensorboard."
def __init__(self, model:nn.Module, tbwriter:SummaryWriter, input_to_model:torch.Tensor):
super().__init__(tbwriter=tbwriter, iteration=0)
self.model,self.input_to_model = model,input_to_model
def write(self)->None:
"Writes single model graph to Tensorboard."
self.tbwriter.add_graph(model=self.model, input_to_model=self.input_to_model)
class GraphTBWriter():
"Writes model network graph to Tensorboard."
def write(self, model:nn.Module, tbwriter:SummaryWriter, input_to_model:torch.Tensor)->None:
"Writes model graph to Tensorboard."
request = GraphTBRequest(model=model, tbwriter=tbwriter, input_to_model=input_to_model)
asyncTBWriter.request_write(request)
|
distribute_coordinator_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Distribute Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import json
import os
import sys
import threading
import time
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error: # pylint: disable=invalid-name
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.distribute import distribute_coordinator_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_manager
CHIEF = distribute_coordinator._TaskType.CHIEF
WORKER = distribute_coordinator._TaskType.WORKER
PS = distribute_coordinator._TaskType.PS
EVALUATOR = distribute_coordinator._TaskType.EVALUATOR
STANDALONE_CLIENT = distribute_coordinator.CoordinatorMode.STANDALONE_CLIENT
INDEPENDENT_WORKER = distribute_coordinator.CoordinatorMode.INDEPENDENT_WORKER
NUM_WORKERS = 3
NUM_PS = 2
original_sys_exit = sys.exit
def _bytes_to_str(maybe_bytes):
if isinstance(maybe_bytes, six.string_types):
return maybe_bytes
else:
return str(maybe_bytes, "utf-8")
def _strip_protocol(target):
# cluster_spec expects "host:port" strings.
if "//" in target:
return target.split("//")[1]
else:
return target
class MockExtended(object):
def __init__(self,
between_graph=False,
should_init=None,
should_checkpoint=None,
should_save_summary=None):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=None,
should_checkpoint=None,
should_save_summary=None):
self.extended = MockExtended(between_graph, should_init, should_checkpoint,
should_save_summary)
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
if self.extended.experimental_should_init is None:
if task_id == 0:
self.extended.experimental_should_init = True
else:
self.extended.experimental_should_init = False
if self.extended.should_checkpoint is None:
if task_id == 0:
self.extended.should_checkpoint = True
else:
self.extended.should_checkpoint = False
if self.extended.should_save_summary is None:
if task_id == 0:
self.extended.should_save_summary = True
else:
self.extended.should_save_summary = False
if session_config:
if (cluster_spec and task_type and task_id is not None and
self.extended.experimental_between_graph):
session_config.intra_op_parallelism_threads += 1
if task_type in ["chief", "worker"]:
session_config.device_filters.extend(
["/job:%s/task:%d" % (task_type, task_id), "/job:ps"])
else:
session_config.inter_op_parallelism_threads += 1
session_config.device_filters.append("/job:somejob")
class MockServer(object):
def __init__(self):
self._joined = False
self._started = False
def start(self):
self._started = True
def join(self):
assert not self._joined
self._joined = True
@property
def joined(self):
return self._joined
@property
def started(self):
return self._started
class DistributeCoordinatorTestBase(test.TestCase):
@classmethod
def setUpClass(cls):
# We have to create a global in-process cluster because once an in-process
# tensorflow server is created, there is no way to terminate it. Please see
# multi_worker_test_base.py for more details.
# TODO(yuefengz): use the utitliy from multi_worker_test_base.
cls._workers, cls._ps = test_util.create_local_cluster(
NUM_WORKERS, num_ps=NUM_PS)
cls._cluster_spec = {
WORKER: [
_strip_protocol(_bytes_to_str(w.target)) for w in cls._workers
],
PS: [_strip_protocol(_bytes_to_str(ps.target)) for ps in cls._ps]
}
def setUp(self):
self._result_correct = 0
self._lock = threading.Lock()
self._worker_context = {}
self._strategy_property = {}
self._std_servers = {}
self._barrier = distribute_coordinator._Barrier(NUM_WORKERS)
@contextlib.contextmanager
def _test_session(self, target):
config = config_pb2.ConfigProto(allow_soft_placement=True)
config.graph_options.optimizer_options.opt_level = -1
with session.Session(graph=None, config=config, target=target) as sess:
yield sess
# TODO(yuefengz): use the utitliy from multi_worker_test_base.
def _create_cluster_spec(self,
has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
cluster_spec = {}
if has_chief:
cluster_spec[CHIEF] = ["localhost:%s" % portpicker.pick_unused_port()]
if num_workers:
cluster_spec[WORKER] = [
"localhost:%s" % portpicker.pick_unused_port()
for _ in range(num_workers)
]
if num_ps:
cluster_spec[PS] = [
"localhost:%s" % portpicker.pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec[EVALUATOR] = ["localhost:%s" % portpicker.pick_unused_port()]
return cluster_spec
def _in_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
xs = []
expected = 0.0
for i in range(context.num_workers):
with ops.device("/job:worker/task:%d" % i):
x = variable_scope.get_variable("x_%d" % i, initializer=10.0)
x_add = x.assign_add(float(i))
xs.append(x_add)
expected += i + 10.0
with ops.device("/job:worker/task:0"):
result = math_ops.add_n(xs)
self.evaluate(variables.global_variables_initializer())
result_value = sess.run(result)
self.assertEqual(result_value, expected)
if result_value == expected:
self._result_correct += 1
def _run_coordinator_in_thread(self, worker_fn, strategy, **kwargs):
t = threading.Thread(
target=distribute_coordinator.run_distribute_coordinator,
args=(worker_fn, strategy),
kwargs=kwargs)
t.start()
return t
def _run_multiple_coordinator_in_threads(self, worker_fn, strategy,
cluster_spec, **kwargs):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_coordinator_in_thread(
worker_fn,
strategy,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
**kwargs)
threads[task_type].append(t)
return threads
def _between_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable(
"x", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable(
"y", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
if context.is_chief:
self.evaluate(variables.global_variables_initializer())
# Synchronize workers after initializaton.
if context.has_barrier:
context.wait_for_other_workers()
else:
while True:
uninit_vars = sess.run(variables.report_uninitialized_variables())
# pylint: disable=g-explicit-length-test
if len(uninit_vars) == 0:
break
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _between_graph_with_monitored_session(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable("xx", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable("yy", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
# The monitored session will run init or ready ops.
with monitored_session.MonitoredSession() as sess:
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _dump_worker_context(self, strategy):
"""Dumps the propoerties of each worker context.
It dumps the context properties to a dict mapping from task_type to a list
of tuples of master_target, num_workers, is_chief and distribute_mode, where
the list is indexed by the task_id.
Args:
strategy: a `DistributionStrategy` object.
"""
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._worker_context:
self._worker_context[task_type] = []
while len(self._worker_context[task_type]) <= task_id:
self._worker_context[task_type].append(None)
self._worker_context[task_type][task_id] = (context.master_target,
context.num_workers,
context.is_chief,
context.distributed_mode)
def _dump_strategy_property(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
self.assertEqual(context._strategy.extended.experimental_should_init,
strategy.extended.experimental_should_init)
self.assertEqual(context.should_checkpoint,
strategy.extended.should_checkpoint)
self.assertEqual(context.should_save_summary,
strategy.extended.should_save_summary)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._strategy_property:
self._strategy_property[task_type] = []
while len(self._strategy_property[task_type]) <= task_id:
self._strategy_property[task_type].append(None)
self._strategy_property[task_type][task_id] = (
context._strategy.extended.experimental_should_init,
context.should_checkpoint,
context.should_save_summary)
def _run_mock_std_server(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
task_type = str(task_type)
task_id = task_id or 0
with self._lock:
if task_type not in self._std_servers:
self._std_servers[task_type] = []
while len(self._std_servers[task_type]) <= task_id:
self._std_servers[task_type].append(None)
server = MockServer()
self._std_servers[task_type][task_id] = server
return server
class DistributeCoordinatorTestStandaloneMode(DistributeCoordinatorTestBase):
def testInGraphStandaloneMode(self):
"""Test it runs in-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
"""Test it runs between-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
@test_util.run_v1_only("MonitoredSession removed from v2")
def testBetweenGraphWithMonitoredSession(self):
"""Test monitored session in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS, False, True))
def testBetweenGraphStrategyProperties(self):
# Dumps properties of the strategy objects.
distribute_coordinator.run_distribute_coordinator(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
def testLocalContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=None)
# There is only a "None" task.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], ("", 0, True, False))
def testBetweenGraphContextWithChief(self):
# Adds a chief node, so there are NUM_WORKERS + 1 workers in total.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[CHIEF] = ["fake_chief"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=cluster_spec,
rpc_layer="grpc")
# There are one CHIEF and three workers.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue(CHIEF in self._worker_context)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[CHIEF]), 1)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context[CHIEF][0],
("grpc://fake_chief", 4, True, True))
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS + 1, False, True))
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[EVALUATOR] = ["fake_evaluator"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=cluster_spec,
rpc_layer=None)
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], (_strip_protocol(
_bytes_to_str(self._workers[0].target)), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
("fake_evaluator", 3, True, False))
class DistributeCoordinatorTestInpendentWorkerMode(
DistributeCoordinatorTestBase):
def testInGraph(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
threads = self._run_multiple_coordinator_in_threads(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER)
threads[WORKER][0].join()
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
@test_util.run_v1_only("MonitoredSession removed from v2")
def testBetweenGraphWithMonitoredSession(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(cluster_spec[WORKER][1]), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(cluster_spec[WORKER][2]), NUM_WORKERS, False, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertFalse(self._std_servers[WORKER][1].joined)
self.assertFalse(self._std_servers[WORKER][2].joined)
def testBetweenGraphStrategyProperties(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps properties of the strategy objects.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, has_eval=True)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
threads[EVALUATOR][0].join()
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
(cluster_spec[EVALUATOR][0], 3, True, False))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 2)
self.assertTrue(WORKER in self._std_servers)
self.assertTrue(EVALUATOR in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertEqual(len(self._std_servers[EVALUATOR]), 1)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
self.assertFalse(self._std_servers[EVALUATOR][0].joined)
def testRunStdServerInGoogleEnvironment(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["localhost:0"]}
tf_config = {"cluster": cluster_spec, "environment": "google"}
joined = [False]
def _fake_sleep(_):
joined[0] = True
original_sys_exit(0)
def _thread_fn(cluster_spec):
distribute_coordinator.run_distribute_coordinator(
None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
time, "sleep", _fake_sleep):
t = threading.Thread(target=_thread_fn, args=(cluster_spec,))
t.start()
t.join()
self.assertTrue(joined[0])
def testRpcLayerEnvironmentVariable(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec, "rpc_layer": "cake"}
rpc_layer_from_coordinator = [None]
def _run_mock_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None,
environment=None):
del cluster_spec, task_type, task_id, session_config, environment
rpc_layer_from_coordinator[0] = rpc_layer
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _run_mock_server):
distribute_coordinator.run_distribute_coordinator(
None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
self.assertEqual(rpc_layer_from_coordinator[0], "cake")
class StrategyConfigureTest(test.TestCase):
def setUp(self):
self._device_filters = []
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
super(StrategyConfigureTest, self).setUp()
def _dump_device_filters(self, *args, **kwargs):
session_config = kwargs.get("session_config", None)
self._device_filters.extend(session_config.device_filters)
self._intra_op_parallelism_threads = (
session_config.intra_op_parallelism_threads)
self._inter_op_parallelism_threads = (
session_config.inter_op_parallelism_threads)
return MockServer()
def _worker_fn(self, strategy):
worker_context = distribute_coordinator_context.get_current_worker_context()
session_config = worker_context._session_config
self._device_filters.extend(session_config.device_filters)
self._intra_op_parallelism_threads = (
session_config.intra_op_parallelism_threads)
self._inter_op_parallelism_threads = (
session_config.inter_op_parallelism_threads)
return MockServer()
def test_session_config_in_std_server(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server",
self._dump_device_filters):
distribute_coordinator.run_distribute_coordinator(
lambda _: None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="worker",
task_id=0)
self.assertEqual(self._intra_op_parallelism_threads, 1)
self.assertEqual(self._inter_op_parallelism_threads, 0)
def test_session_config_in_session_creator(self):
cluster_spec = {"worker": ["localhost:0"]}
tf_config = {"cluster": cluster_spec}
# Reset the saved Server state.
distribute_coordinator._thread_local = threading.local() # pylint: disable=protected-access
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
distribute_coordinator.run_distribute_coordinator(
self._worker_fn,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="worker",
task_id=0)
self.assertEqual(self._device_filters, ["/job:worker/task:0", "/job:ps"])
self.assertEqual(self._intra_op_parallelism_threads, 2)
self.assertEqual(self._inter_op_parallelism_threads, 0)
def test_eval_strategy_configure(self):
cluster_spec = {"evaluator": ["localhost:0"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
distribute_coordinator.run_distribute_coordinator(
lambda _: None,
MockStrategy(between_graph=False),
eval_fn=self._worker_fn,
eval_strategy=MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="evaluator",
task_id=0)
self.assertEqual(self._device_filters, ["/job:somejob"])
self.assertEqual(self._intra_op_parallelism_threads, 0)
self.assertEqual(self._inter_op_parallelism_threads, 2)
class RunStandardTensorflowServerTest(test.TestCase):
def test_std_server_arguments(self):
cs = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cs, "task": {"type": "ps", "id": 0}}
def _mock_run_std_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None):
self.assertEqual(cluster_spec.as_dict(), cs)
self.assertEqual(task_type, "ps")
self.assertEqual(task_id, 0)
self.assertEqual(session_config.experimental.collective_group_leader,
"/job:worker/replica:0/task:0")
self.assertEqual(session_config.intra_op_parallelism_threads, 1)
self.assertEqual(rpc_layer, "grpc")
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _mock_run_std_server):
session_config = config_pb2.ConfigProto()
session_config.intra_op_parallelism_threads = 1
mock_server = distribute_coordinator.run_standard_tensorflow_server(
session_config)
self.assertTrue(mock_server.started)
if __name__ == "__main__":
# TODO(yuefengz): find a smart way to terminite std server threads.
with test.mock.patch.object(sys, "exit", os._exit):
# Reduce `recovery_wait_secs` from 30 seconds so the test completes quickly.
orig_init = session_manager.SessionManager.__init__
def new_init(*args, **kwargs):
kwargs.pop("recovery_wait_secs", None)
kwargs["recovery_wait_secs"] = 0.5
orig_init(*args, **kwargs)
session_manager.SessionManager.__init__ = new_init
test.main()
|
http.py
|
# -*- coding: utf-8 -*-
"""
This module contains some helpers to deal with the real http
world.
"""
import threading
import logging
import select
import socket
import time
import os
import six
import webob
from six.moves import http_client
from waitress.server import TcpWSGIServer
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
ip, port = s.getsockname()
s.close()
ip = os.environ.get('WEBTEST_SERVER_BIND', '127.0.0.1')
return ip, port
def check_server(host, port, path_info='/', timeout=3, retries=30):
"""Perform a request until the server reply"""
if retries < 0:
return 0
time.sleep(.3)
for i in range(retries):
try:
conn = http_client.HTTPConnection(host, int(port), timeout=timeout)
conn.request('GET', path_info)
res = conn.getresponse()
return res.status
except (socket.error, http_client.HTTPException):
time.sleep(.3)
return 0
class StopableWSGIServer(TcpWSGIServer):
"""StopableWSGIServer is a TcpWSGIServer which run in a separated thread.
This allow to use tools like casperjs or selenium.
Server instance have an ``application_url`` attribute formated with the
server host and port.
"""
was_shutdown = False
def __init__(self, application, *args, **kwargs):
super(StopableWSGIServer, self).__init__(self.wrapper, *args, **kwargs)
self.runner = None
self.test_app = application
self.application_url = 'http://%s:%s/' % (self.adj.host, self.adj.port)
def wrapper(self, environ, start_response):
"""Wrap the wsgi application to override some path:
``/__application__``: allow to ping the server.
``/__file__?__file__={path}``: serve the file found at ``path``
"""
if '__file__' in environ['PATH_INFO']:
req = webob.Request(environ)
resp = webob.Response()
resp.content_type = 'text/html; charset=UTF-8'
filename = req.params.get('__file__')
if os.path.isfile(filename):
body = open(filename, 'rb').read()
body = body.replace(six.b('http://localhost/'),
six.b('http://%s/' % req.host))
resp.body = body
else:
resp.status = '404 Not Found'
return resp(environ, start_response)
elif '__application__' in environ['PATH_INFO']:
return webob.Response('server started')(environ, start_response)
return self.test_app(environ, start_response)
def run(self):
"""Run the server"""
try:
self.asyncore.loop(.5, map=self._map)
except select.error: # pragma: no cover
if not self.was_shutdown:
raise
def shutdown(self):
"""Shutdown the server"""
# avoid showing traceback related to asyncore
self.was_shutdown = True
self.logger.setLevel(logging.FATAL)
while self._map:
triggers = list(self._map.values())
for trigger in triggers:
trigger.handle_close()
self.maintenance(0)
self.task_dispatcher.shutdown()
return True
@classmethod
def create(cls, application, **kwargs):
"""Start a server to serve ``application``. Return a server
instance."""
host, port = get_free_port()
if 'port' not in kwargs:
kwargs['port'] = port
if 'host' not in kwargs:
kwargs['host'] = host
if 'expose_tracebacks' not in kwargs:
kwargs['expose_tracebacks'] = True
server = cls(application, **kwargs)
server.runner = threading.Thread(target=server.run)
server.runner.daemon = True
server.runner.start()
return server
def wait(self, retries=30):
"""Wait until the server is started"""
running = check_server(self.adj.host, self.adj.port,
'/__application__', retries=retries)
if running:
return True
try:
self.shutdown()
finally:
return False
|
athenad.py
|
#!/usr/bin/env python3
import base64
import bz2
import hashlib
import io
import json
import os
import queue
import random
import select
import socket
import subprocess
import sys
import tempfile
import threading
import time
from collections import namedtuple
from datetime import datetime
from functools import partial
from typing import Any, Dict
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import (ABNF, WebSocketException, WebSocketTimeoutException,
create_connection)
import cereal.messaging as messaging
from cereal import log
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.file_helpers import CallbackReader
from common.params import Params
from common.realtime import sec_since_boot, set_core_affinity
from selfdrive.hardware import HARDWARE, PC, AGNOS
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.statsd import STATS_DIR
from selfdrive.swaglog import SWAGLOG_DIR, cloudlog
from selfdrive.version import get_commit, get_origin, get_short_branch, get_version
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = {8022}
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
MAX_AGE = 31 * 24 * 3600 # seconds
WS_FRAME_SIZE = 4096
NetworkType = log.DeviceState.NetworkType
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
low_priority_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress', 'allow_cellular'], defaults=(0, False, 0, False))
cur_upload_items: Dict[int, Any] = {}
def strip_bz2_extension(fn):
if fn.endswith('.bz2'):
return fn[:-4]
return fn
class AbortTransferException(Exception):
pass
class UploadQueueCache():
params = Params()
@staticmethod
def initialize(upload_queue):
try:
upload_queue_json = UploadQueueCache.params.get("AthenadUploadQueue")
if upload_queue_json is not None:
for item in json.loads(upload_queue_json):
upload_queue.put(UploadItem(**item))
except Exception:
cloudlog.exception("athena.UploadQueueCache.initialize.exception")
@staticmethod
def cache(upload_queue):
try:
items = [i._asdict() for i in upload_queue.queue if i.id not in cancelled_uploads]
UploadQueueCache.params.put("AthenadUploadQueue", json.dumps(items))
except Exception:
cloudlog.exception("athena.UploadQueueCache.cache.exception")
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
threading.Thread(target=stat_handler, args=(end_event,), name='stat_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def retry_upload(tid: int, end_event: threading.Event, increase_count: bool = True) -> None:
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
new_retry_count = item.retry_count + 1 if increase_count else item.retry_count
item = item._replace(
retry_count=new_retry_count,
progress=0,
current=False
)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
def upload_handler(end_event: threading.Event) -> None:
sm = messaging.SubMaster(['deviceState'])
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
# Remove item if too old
age = datetime.now() - datetime.fromtimestamp(cur_upload_items[tid].created_at / 1000)
if age.total_seconds() > MAX_AGE:
cloudlog.event("athena.upload_handler.expired", item=cur_upload_items[tid], error=True)
continue
# Check if uploading over metered connection is allowed
sm.update(0)
metered = sm['deviceState'].networkMetered
network_type = sm['deviceState'].networkType.raw
if metered and (not cur_upload_items[tid].allow_cellular):
retry_upload(tid, end_event, False)
continue
try:
def cb(sz, cur):
# Abort transfer if connection changed to metered after starting upload
sm.update(0)
metered = sm['deviceState'].networkMetered
if metered and (not cur_upload_items[tid].allow_cellular):
raise AbortTransferException
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
fn = cur_upload_items[tid].path
try:
sz = os.path.getsize(fn)
except OSError:
sz = -1
cloudlog.event("athena.upload_handler.upload_start", fn=fn, sz=sz, network_type=network_type, metered=metered, retry_count=cur_upload_items[tid].retry_count)
response = _do_upload(cur_upload_items[tid], cb)
if response.status_code not in (200, 201, 401, 403, 412):
cloudlog.event("athena.upload_handler.retry", status_code=response.status_code, fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event)
else:
cloudlog.event("athena.upload_handler.success", fn=fn, sz=sz, network_type=network_type, metered=metered)
UploadQueueCache.cache(upload_queue)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError):
cloudlog.event("athena.upload_handler.timeout", fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event)
except AbortTransferException:
cloudlog.event("athena.upload_handler.abort", fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event, False)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
path = upload_item.path
compress = False
# If file does not exist, but does exist without the .bz2 extension we will compress on the fly
if not os.path.exists(path) and os.path.exists(strip_bz2_extension(path)):
path = strip_bz2_extension(path)
compress = True
with open(path, "rb") as f:
if compress:
cloudlog.event("athena.upload_handler.compress", fn=path, fn_orig=upload_item.path)
data = bz2.compress(f.read())
size = len(data)
data = io.BytesIO(data)
else:
size = os.fstat(f.fileno()).st_size
data = f
if callback:
data = CallbackReader(data, callback, size)
return requests.put(upload_item.url,
data=data,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion() -> Dict[str, str]:
return {
"version": get_version(),
"remote": get_origin(''),
"branch": get_short_branch(''),
"commit": get_commit(default=''),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0, place_name=None, place_details=None):
destination = {
"latitude": latitude,
"longitude": longitude,
"place_name": place_name,
"place_details": place_details,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
return uploadFilesToUrls([{
"fn": fn,
"url": url,
"headers": headers,
}])
@dispatcher.add_method
def uploadFilesToUrls(files_data):
items = []
failed = []
for file in files_data:
fn = file.get('fn', '')
if len(fn) == 0 or fn[0] == '/' or '..' in fn or 'url' not in file:
failed.append(fn)
continue
path = os.path.join(ROOT, fn)
if not os.path.exists(path) and not os.path.exists(strip_bz2_extension(path)):
failed.append(fn)
continue
item = UploadItem(
path=path,
url=file['url'],
headers=file.get('headers', {}),
created_at=int(time.time() * 1000),
id=None,
allow_cellular=file.get('allow_cellular', False),
)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
items.append(item._asdict())
UploadQueueCache.cache(upload_queue)
resp = {"enqueued": len(items), "items": items}
if failed:
resp["failed"] = failed
return resp
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
if not isinstance(upload_id, list):
upload_id = [upload_id]
uploading_ids = {item.id for item in list(upload_queue.queue)}
cancelled_ids = uploading_ids.intersection(upload_id)
if len(cancelled_ids) == 0:
return 404
cancelled_uploads.update(cancelled_ids)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
@dispatcher.add_method
def setBandwithLimit(upload_speed_kbps, download_speed_kbps):
if not AGNOS:
return {"success": 0, "error": "only supported on AGNOS"}
try:
HARDWARE.set_bandwidth_limit(upload_speed_kbps, download_speed_kbps)
return {"success": 1}
except subprocess.CalledProcessError as e:
return {"success": 0, "error": "failed to set limit", "stdout": e.stdout, "stderr": e.stderr}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
dongle_id = Params().get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(False)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworkMetered():
network_type = HARDWARE.get_network_type()
return HARDWARE.get_network_metered(network_type)
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import jpeg_write, snapshot
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path) as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def stat_handler(end_event):
while not end_event.is_set():
last_scan = 0
curr_scan = sec_since_boot()
try:
if curr_scan - last_scan > 10:
stat_filenames = list(filter(lambda name: not name.startswith(tempfile.gettempprefix()), os.listdir(STATS_DIR)))
if len(stat_filenames) > 0:
stat_path = os.path.join(STATS_DIR, stat_filenames[0])
with open(stat_path) as f:
jsonrpc = {
"method": "storeStats",
"params": {
"stats": f.read()
},
"jsonrpc": "2.0",
"id": stat_filenames[0]
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
os.remove(stat_path)
last_scan = curr_scan
except Exception:
cloudlog.exception("athena.stat_handler.exception")
time.sleep(0.1)
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = low_priority_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
try:
set_core_affinity([0, 1, 2, 3])
except Exception:
cloudlog.exception("failed to set core affinity")
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
UploadQueueCache.initialize(upload_queue)
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("LastAthenaPingTime")
except socket.timeout:
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
marathon_lb.py
|
#!/usr/bin/env python3
"""# marathon-lb
### Overview
The marathon-lb is a service discovery and load balancing tool
for Marathon based on HAProxy. It reads the Marathon task information
and dynamically generates HAProxy configuration details.
To gather the task information, marathon-lb needs to know where
to find Marathon. The service configuration details are stored in labels.
Every service port in Marathon can be configured independently.
### Configuration
Service configuration lives in Marathon via labels.
Marathon-lb just needs to know where to find Marathon.
### Command Line Usage
"""
import argparse
import hashlib
import json
import logging
import os
import os.path
import random
import re
import shlex
import signal
import stat
import subprocess
import sys
import threading
import time
import datetime
from itertools import cycle
from collections import defaultdict
from operator import attrgetter
from shutil import move, copy
from tempfile import mkstemp
import dateutil.parser
import requests
import pycurl
from common import (get_marathon_auth_params, set_logging_args,
set_marathon_auth_args, setup_logging, cleanup_json)
from config import ConfigTemplater, label_keys
from lrucache import LRUCache
from utils import (CurlHttpEventStream, get_task_ip_and_ports, ip_cache,
ServicePortAssigner)
logger = logging.getLogger('marathon_lb')
SERVICE_PORT_ASSIGNER = ServicePortAssigner()
class MarathonBackend(object):
def __init__(self, host, ip, port, draining):
self.host = host
"""
The host that is running this task.
"""
self.ip = ip
"""
The IP address used to access the task. For tasks using IP-per-task,
this is the actual IP address of the task; otherwise, it is the IP
address resolved from the hostname.
"""
self.port = port
"""
The port used to access a particular service on a task. For tasks
using IP-per-task, this is the actual port exposed by the task;
otherwise, it is the port exposed on the host.
"""
self.draining = draining
"""
Whether we should be draining access to this task in the LB.
"""
def __hash__(self):
return hash((self.host, self.port))
def __repr__(self):
return "MarathonBackend(%r, %r, %r)" % (self.host, self.ip, self.port)
class MarathonService(object):
def __init__(self, appId, servicePort, healthCheck, strictMode):
self.appId = appId
self.servicePort = servicePort
self.backends = set()
self.hostname = None
self.proxypath = None
self.revproxypath = None
self.redirpath = None
self.haproxy_groups = frozenset()
self.path = None
self.authRealm = None
self.authUser = None
self.authPasswd = None
self.sticky = False
self.enabled = not strictMode
self.redirectHttpToHttps = False
self.useHsts = False
self.sslCert = None
self.bindOptions = None
self.bindAddr = '*'
self.groups = frozenset()
self.mode = None
self.balance = 'roundrobin'
self.healthCheck = healthCheck
self.labels = {}
self.backend_weight = 0
self.network_allowed = None
self.healthcheck_port_index = None
if healthCheck:
if healthCheck['protocol'] == 'HTTP':
self.mode = 'http'
def add_backend(self, host, ip, port, draining):
self.backends.add(MarathonBackend(host, ip, port, draining))
def __hash__(self):
return hash(self.servicePort)
def __eq__(self, other):
return self.servicePort == other.servicePort
def __repr__(self):
return "MarathonService(%r, %r)" % (self.appId, self.servicePort)
class MarathonApp(object):
def __init__(self, marathon, appId, app):
self.app = app
self.groups = frozenset()
self.appId = appId
# port -> MarathonService
self.services = dict()
def __hash__(self):
return hash(self.appId)
def __eq__(self, other):
return self.appId == other.appId
class Marathon(object):
def __init__(self, hosts, health_check, strict_mode, auth, ca_cert=None):
# TODO(cmaloney): Support getting master list from zookeeper
self.__hosts = hosts
self.__health_check = health_check
self.__strict_mode = strict_mode
self.__auth = auth
self.__cycle_hosts = cycle(self.__hosts)
self.__verify = False
if ca_cert:
self.__verify = ca_cert
def api_req_raw(self, method, path, auth, body=None, **kwargs):
for host in self.__hosts:
path_str = os.path.join(host, 'v2')
for path_elem in path:
path_str = path_str + "/" + path_elem
response = requests.request(
method,
path_str,
auth=auth,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'
},
timeout=(3.05, 46),
**kwargs
)
logger.debug("%s %s", method, response.url)
if response.status_code == 200:
break
response.raise_for_status()
resp_json = cleanup_json(response.json())
if 'message' in resp_json:
response.reason = "%s (%s)" % (
response.reason,
resp_json['message'])
return response
def api_req(self, method, path, **kwargs):
data = self.api_req_raw(method, path, self.__auth,
verify=self.__verify, **kwargs).json()
return cleanup_json(data)
def create(self, app_json):
return self.api_req('POST', ['apps'], app_json)
def get_app(self, appid):
logger.info('fetching app %s', appid)
return self.api_req('GET', ['apps', appid])["app"]
# Lists all running apps.
def list(self):
logger.info('fetching apps')
return self.api_req('GET', ['apps'],
params={'embed': 'apps.tasks'})["apps"]
def health_check(self):
return self.__health_check
def strict_mode(self):
return self.__strict_mode
def tasks(self):
logger.info('fetching tasks')
return self.api_req('GET', ['tasks'])["tasks"]
def get_event_stream(self):
url = self.host + "/v2/events?plan-format=light&" + \
"event_type=status_update_event&" + \
"event_type=health_status_changed_event&" + \
"event_type=api_post_event"
return CurlHttpEventStream(url, self.__auth, self.__verify)
def iter_events(self, stream):
logger.info(
"SSE Active, trying fetch events from {0}".format(stream.url))
class Event(object):
def __init__(self, data):
self.data = data
for line in stream.iter_lines():
if line.strip() != '':
for real_event_data in re.split(r'\r\n',
line.decode('utf-8')):
if real_event_data[:6] == "data: ":
event = Event(data=real_event_data[6:])
yield event
@property
def host(self):
return next(self.__cycle_hosts)
def has_group(groups, app_groups):
# All groups / wildcard match
if '*' in groups:
return True
# empty group only
if len(groups) == 0 and len(app_groups) == 0:
raise Exception("No groups specified")
# Contains matching groups
if (len(frozenset(app_groups) & groups)):
return True
return False
def get_backend_port(apps, app, idx):
"""
Return the port of the idx-th backend of the app which index in apps
is defined by app.healthcheck_port_index.
Example case:
We define an app mapping two ports: 9000 and 9001, that we
scaled to 3 instances.
The port 9000 is used for the app itself, and the port 9001
is used for the app healthchecks. Hence, we have 2 apps
at the marathon level, each with 3 backends (one for each
container).
If app.healthcheck_port_index is set to 1 (via the
HAPROXY_0_BACKEND_HEALTHCHECK_PORT_INDEX label), then
get_backend_port(apps, app, 3) will return the port of the 3rd
backend of the second app.
See https://github.com/mesosphere/marathon-lb/issues/198 for the
actual use case.
Note: if app.healthcheck_port_index has a out of bounds value,
then the app idx-th backend is returned instead.
"""
def get_backends(app):
key_func = attrgetter('host', 'port')
return sorted(list(app.backends), key=key_func)
apps = [_app for _app in apps if _app.appId == app.appId]
# If no healthcheck port index is defined, or if its value is nonsense
# simply return the app port
if app.healthcheck_port_index is None \
or abs(app.healthcheck_port_index) > len(apps):
return get_backends(app)[idx].port
# If a healthcheck port index is defined, fetch the app corresponding
# to the argument app healthcheck port index,
# and return its idx-th backend port
apps = sorted(apps, key=attrgetter('appId', 'servicePort'))
backends = get_backends(apps[app.healthcheck_port_index])
return backends[idx].port
def _get_health_check_options(template, health_check, health_check_port):
return template.format(
healthCheck=health_check,
healthCheckPortIndex=health_check.get('portIndex'),
healthCheckPort=health_check_port,
healthCheckProtocol=health_check['protocol'],
healthCheckPath=health_check.get('path', '/'),
healthCheckTimeoutSeconds=health_check['timeoutSeconds'],
healthCheckIntervalSeconds=health_check['intervalSeconds'],
healthCheckGracePeriodSeconds=health_check['gracePeriodSeconds'],
healthCheckMaxConsecutiveFailures=health_check[
'maxConsecutiveFailures'],
healthCheckFalls=health_check['maxConsecutiveFailures'] + 1,
healthCheckPortOptions=' port ' + str(
health_check_port) if health_check_port else ''
)
def mergeVhostTable(left, right):
result = left.copy()
for key in right:
if key in result:
result[key][0].extend(right[key][0])
result[key][1].update(right[key][1])
result[key][2].update(right[key][2])
else:
result[key] = right[key]
return result
def calculate_server_id(server_name, taken_server_ids):
"""Calculate a stable server id given server name
Calculates stable server id [1] for the given server name [2]
which has following properties:
* is unique/has not been assigned yet
* is an integer from the range 1-32767
* is stable - i.e. calling this function repeatably with the same
server name must yield the same server id.
THE STABILITY OF SERVER_ID IS GUARANTEED IF THE ORDER OF CALLS OF THIS
FUNCTION IS PRESERVED, I.E. THE BACKEND LIST IS SORTED BEFORE
PROCESSING
[1] http://cbonte.github.io/haproxy-dconv/1.8/configuration.html#5.2-id
[2] http://cbonte.github.io/haproxy-dconv/1.8/configuration.html#5.2
Args:
server_name(str): the name of the given backend server
taken_server_ids(set): list of allready assigned server ids
Returns:
An integer depicting the server ID
"""
if server_name == '' or server_name is None:
raise ValueError("Malformed server name: {}".format(server_name))
server_name_encoded = server_name.encode('utf-8')
server_name_shasum = hashlib.sha256(server_name_encoded).hexdigest()
# The number 32767 is not coincidental. It is very important to notice
# in [1] that:
# * due to the use of atol() call [2], server id must not exceed the length
# of 'long int' on a given platform. According to [3] it is at
# least 32bits long so 32bits is a safe limit.
# * the atol() call returns `long int` which is assigned to puid var which
# int turn is `int`. As per [4]:
#
# ```
# On a system where long is wider than int, if the value won't fit in an
# int, then the result of the conversion is implementation-defined. (Or,
# starting in C99, it can raise an implementation-defined signal, but I
# don't know of any compilers that actually do that.) What typically
# happens is that the high-order bits are discarded, but you shouldn't
# depend on that. (The rules are different for unsigned types; the result
# of converting a signed or unsigned integer to an unsigned type is well
# defined.)
# ```
#
# So we need to assume that server id is 16 bit signed integer. Server id
# must be a positive number so this gives us at most 2**15-1 = 32767
# possible server IDs. Beyond that there are dragons and the undefined
# behaviour of the C compiler ;)
#
# [1] https://github.com/haproxy/haproxy/blob/c55b88ece616afe0b28dc81eb39bad37b5f9c33f/src/server.c#L359-L388 # noqa: E501
# [2] https://github.com/haproxy/haproxy/blob/c55b88ece616afe0b28dc81eb39bad37b5f9c33f/src/server.c#L368 # noqa: E501
# [3] https://en.wikipedia.org/wiki/C_data_types
# [4] https://stackoverflow.com/a/13652624
server_id = int(server_name_shasum, 16) % 32767
if server_id not in taken_server_ids and server_id > 0:
taken_server_ids.add(server_id)
return server_id
# We try to solve the collisions by recursively calling
# calculate_backend_id() with the server name argument set to the initial
# server name plus the calculated `server_name_shasum` appended to it.
# This way we should get stable IDs during the next haproxy
# reconfiguration. The more backends there are the more likely the
# collisions will get. Initially the probability is 1/(2**15-1) * 100 =
# 0.003%. As the new_server_id gets longer the sha sum calculation will be
# getting more CPU-heavy and the number of SHA sum calculations per backend
# server will increase. Still - it is unlikely that we will hit the number
# backend server that will this approach a problem - the number of backend
# servers would need to be in the order of thousands.
new_server_name = "{0} {1}".format(server_name, server_name_shasum)
if server_id == 0:
msg_fmt = ("server id == 0 for `%s`, retrying with `%s`")
logger.info(msg_fmt, server_name, new_server_name)
else:
msg_fmt = ("server id collision for `%s`: `%d` was already assigned, "
"retrying with `%s`")
logger.info(msg_fmt, server_name, server_id, new_server_name)
return calculate_server_id(new_server_name, taken_server_ids)
def config(apps, groups, bind_http_https, ssl_certs, templater,
haproxy_map=False, domain_map_array=[], app_map_array=[],
config_file="/etc/haproxy/haproxy.cfg",
group_https_by_vhosts=False):
logger.info("generating config")
config = templater.haproxy_head
groups = frozenset(groups)
duplicate_map = {}
# do not repeat use backend multiple times since map file is same.
_ssl_certs = ssl_certs or "/etc/ssl/cert.pem"
_ssl_certs = _ssl_certs.split(",")
if bind_http_https:
http_frontends = templater.haproxy_http_frontend_head
if group_https_by_vhosts:
https_frontends = templater.haproxy_https_grouped_frontend_head
else:
https_frontends = templater.haproxy_https_frontend_head.format(
sslCerts=" ".join(map(lambda cert: "crt " + cert, _ssl_certs))
)
# This should handle situations where customers have a custom HAPROXY_HEAD
# that includes the 'daemon' flag or does not expose listener fds:
if 'daemon' in config or "expose-fd listeners" not in config:
upgrade_warning = '''\
Error in custom HAPROXY_HEAD template: \
In Marathon-LB 1.12, the default HAPROXY_HEAD section changed, please \
make the following changes to your custom template: Remove "daemon", \
Add "stats socket /var/run/haproxy/socket expose-fd listeners". \
More information can be found here: \
https://docs.mesosphere.com/services/marathon-lb/advanced/#global-template.\
'''
raise Exception(upgrade_warning)
userlists = str()
frontends = str()
backends = str()
http_appid_frontends = templater.haproxy_http_frontend_appid_head
apps_with_http_appid_backend = []
http_frontend_list = []
https_frontend_list = []
https_grouped_frontend_list = defaultdict(lambda: ([], set(), set()))
haproxy_dir = os.path.dirname(config_file)
logger.debug("HAProxy dir is %s", haproxy_dir)
for app in sorted(apps, key=attrgetter('appId', 'servicePort')):
# App only applies if we have it's group
# Check if there is a haproxy group associated with service group
# if not fallback to original HAPROXY group.
# This is added for backward compatability with HAPROXY_GROUP
if app.haproxy_groups:
if not has_group(groups, app.haproxy_groups):
continue
else:
if not has_group(groups, app.groups):
continue
# Skip if it's not actually enabled
if not app.enabled:
continue
logger.debug("configuring app %s", app.appId)
if len(app.backends) < 1:
logger.error("skipping app %s as it is not valid to generate" +
" backend without any server entries!", app.appId)
continue
backend = app.appId[1:].replace('/', '_') + '_' + str(app.servicePort)
logger.debug("frontend at %s:%d with backend %s",
app.bindAddr, app.servicePort, backend)
# If app has HAPROXY_{n}_MODE set, use that setting.
# Otherwise use 'http' if HAPROXY_{N}_VHOST is set, and 'tcp' if not.
if app.mode is None:
if app.hostname:
app.mode = 'http'
else:
app.mode = 'tcp'
if app.authUser:
userlist_head = templater.haproxy_userlist_head(app)
userlists += userlist_head.format(
backend=backend,
user=app.authUser,
passwd=app.authPasswd
)
frontend_head = templater.haproxy_frontend_head(app)
frontends += frontend_head.format(
bindAddr=app.bindAddr,
backend=backend,
servicePort=app.servicePort,
mode=app.mode,
sslCert=' ssl crt ' + app.sslCert if app.sslCert else '',
bindOptions=' ' + app.bindOptions if app.bindOptions else ''
)
backend_head = templater.haproxy_backend_head(app)
backends += backend_head.format(
backend=backend,
balance=app.balance,
mode=app.mode
)
# if a hostname is set we add the app to the vhost section
# of our haproxy config
# TODO(lloesche): Check if the hostname is already defined by another
# service
if bind_http_https and app.hostname:
backend_weight, p_fe, s_fe, g_fe = \
generateHttpVhostAcl(templater,
app,
backend,
haproxy_map,
domain_map_array,
haproxy_dir,
duplicate_map)
http_frontend_list.append((backend_weight, p_fe))
https_frontend_list.append((backend_weight, s_fe))
if group_https_by_vhosts:
https_grouped_frontend_list = mergeVhostTable(
https_grouped_frontend_list, g_fe)
# if app mode is http, we add the app to the second http frontend
# selecting apps by http header X-Marathon-App-Id
if app.mode == 'http' and \
app.appId not in apps_with_http_appid_backend:
logger.debug("adding virtual host for app with id %s", app.appId)
# remember appids to prevent multiple entries for the same app
apps_with_http_appid_backend += [app.appId]
cleanedUpAppId = re.sub(r'[^a-zA-Z0-9\-]', '_', app.appId)
if haproxy_map:
if 'map_http_frontend_appid_acl' not in duplicate_map:
http_appid_frontend_acl = templater \
.haproxy_map_http_frontend_appid_acl(app)
http_appid_frontends += http_appid_frontend_acl.format(
haproxy_dir=haproxy_dir
)
duplicate_map['map_http_frontend_appid_acl'] = 1
map_element = {}
map_element[app.appId] = backend
if map_element not in app_map_array:
app_map_array.append(map_element)
else:
http_appid_frontend_acl = templater \
.haproxy_http_frontend_appid_acl(app)
http_appid_frontends += http_appid_frontend_acl.format(
cleanedUpAppId=cleanedUpAppId,
hostname=app.hostname,
appId=app.appId,
backend=backend
)
if app.mode == 'http':
if app.useHsts:
backends += templater.haproxy_backend_hsts_options(app)
backends += templater.haproxy_backend_http_options(app)
backend_http_backend_proxypass = templater \
.haproxy_http_backend_proxypass_glue(app)
if app.proxypath:
backends += backend_http_backend_proxypass.format(
hostname=app.hostname,
proxypath=app.proxypath
)
backend_http_backend_revproxy = templater \
.haproxy_http_backend_revproxy_glue(app)
if app.revproxypath:
backends += backend_http_backend_revproxy.format(
hostname=app.hostname,
rootpath=app.revproxypath
)
backend_http_backend_redir = templater \
.haproxy_http_backend_redir(app)
if app.redirpath:
backends += backend_http_backend_redir.format(
hostname=app.hostname,
redirpath=app.redirpath
)
# Set network allowed ACLs
if app.mode == 'http' and app.network_allowed:
for network in app.network_allowed.split():
backends += templater.\
haproxy_http_backend_network_allowed_acl(app).\
format(network_allowed=network)
backends += templater.haproxy_http_backend_acl_allow_deny
elif app.mode == 'tcp' and app.network_allowed:
for network in app.network_allowed.split():
backends += templater.\
haproxy_tcp_backend_network_allowed_acl(app).\
format(network_allowed=network)
backends += templater.haproxy_tcp_backend_acl_allow_deny
if app.sticky:
logger.debug("turning on sticky sessions")
backends += templater.haproxy_backend_sticky_options(app)
frontend_backend_glue = templater.haproxy_frontend_backend_glue(app)
frontends += frontend_backend_glue.format(backend=backend)
do_backend_healthcheck_options_once = True
key_func = attrgetter('host', 'port')
taken_server_ids = set()
for backend_service_idx, backendServer\
in enumerate(sorted(app.backends, key=key_func)):
if do_backend_healthcheck_options_once:
if app.healthCheck:
template_backend_health_check = None
if app.mode == 'tcp' \
or app.healthCheck['protocol'] == 'TCP' \
or app.healthCheck['protocol'] == 'MESOS_TCP':
template_backend_health_check = templater \
.haproxy_backend_tcp_healthcheck_options(app)
elif app.mode == 'http':
template_backend_health_check = templater \
.haproxy_backend_http_healthcheck_options(app)
if template_backend_health_check:
health_check_port = get_backend_port(
apps,
app,
backend_service_idx)
backends += _get_health_check_options(
template_backend_health_check,
app.healthCheck,
health_check_port)
do_backend_healthcheck_options_once = False
logger.debug(
"backend server %s:%d on %s",
backendServer.ip,
backendServer.port,
backendServer.host)
# Create a unique, friendly name for the backend server. We concat
# the host, task IP and task port together. If the host and task
# IP are actually the same then omit one for clarity.
if backendServer.host != backendServer.ip:
serverName = re.sub(
r'[^a-zA-Z0-9\-]', '_',
(backendServer.host + '_' +
backendServer.ip + '_' +
str(backendServer.port)))
else:
serverName = re.sub(
r'[^a-zA-Z0-9\-]', '_',
(backendServer.ip + '_' +
str(backendServer.port)))
shortHashedServerName = hashlib.sha1(serverName.encode()) \
.hexdigest()[:10]
# In order to keep the state of backend servers consistent between
# reloads, server IDs need to be stable. See
# calculate_backend_id()'s docstring to learn how it is achieved.
server_id = calculate_server_id(serverName, taken_server_ids)
server_health_check_options = None
if app.healthCheck:
template_server_healthcheck_options = None
if app.mode == 'tcp' \
or app.healthCheck['protocol'] == 'TCP' \
or app.healthCheck['protocol'] == 'MESOS_TCP':
template_server_healthcheck_options = templater \
.haproxy_backend_server_tcp_healthcheck_options(app)
elif app.mode == 'http':
template_server_healthcheck_options = templater \
.haproxy_backend_server_http_healthcheck_options(app)
if template_server_healthcheck_options:
if app.healthcheck_port_index is not None:
health_check_port = \
get_backend_port(apps, app, backend_service_idx)
else:
health_check_port = app.healthCheck.get('port')
server_health_check_options = _get_health_check_options(
template_server_healthcheck_options,
app.healthCheck,
health_check_port)
backend_server_options = templater \
.haproxy_backend_server_options(app)
backends += backend_server_options.format(
host=backendServer.host,
host_ipv4=backendServer.ip,
port=backendServer.port,
serverName=serverName,
serverId=server_id,
cookieOptions=' check cookie ' + shortHashedServerName
if app.sticky else '',
healthCheckOptions=server_health_check_options
if server_health_check_options else '',
otherOptions=' disabled' if backendServer.draining else ''
)
http_frontend_list.sort(key=lambda x: x[0], reverse=True)
https_frontend_list.sort(key=lambda x: x[0], reverse=True)
for backend in http_frontend_list:
http_frontends += backend[1]
if group_https_by_vhosts:
for backend in sorted(https_grouped_frontend_list.keys()):
https_frontends +=\
templater.haproxy_https_grouped_vhost_frontend_acl.format(
backend=re.sub(r'[^a-zA-Z0-9\-]', '_', backend),
host=backend)
else:
for backend in https_frontend_list:
https_frontends += backend[1]
config += userlists
if bind_http_https:
config += http_frontends
config += http_appid_frontends
if bind_http_https:
config += https_frontends
if group_https_by_vhosts:
for vhost in sorted(https_grouped_frontend_list.keys()):
config +=\
templater\
.haproxy_https_grouped_vhost_backend_head\
.format(
name=re.sub(r'[^a-zA-Z0-9\-]', '_', vhost))
frontend = templater \
.haproxy_https_grouped_vhost_frontend_head \
.format(name=re.sub(r'[^a-zA-Z0-9\-]', '_', vhost),
sslCerts=" ".join(
map(lambda cert: "crt " + cert,
defaultValue(
https_grouped_frontend_list[vhost][1],
set(_ssl_certs)))),
bindOpts=" ".join(
map(lambda opts: " " + opts,
https_grouped_frontend_list[vhost][2]))
)
for v in sorted(
https_grouped_frontend_list[vhost][0],
key=lambda x: x[0],
reverse=True):
frontend += v[1]
config += frontend
config += frontends
config += backends
return config
def defaultValue(col, default):
if len(col) == 0:
return default
else:
return col
def get_haproxy_pids():
try:
return set(map(lambda i: int(i), subprocess.check_output(
"pidof haproxy",
stderr=subprocess.STDOUT,
shell=True).split()))
except subprocess.CalledProcessError as ex:
logger.debug("Unable to get haproxy pids: %s", ex)
return set()
def reloadConfig():
reloadCommand = []
if args.command:
reloadCommand = shlex.split(args.command)
else:
logger.debug("No reload command provided, trying to find out how to" +
" reload the configuration")
if os.path.isfile('/etc/init/haproxy.conf'):
logger.debug("we seem to be running on an Upstart based system")
reloadCommand = ['reload', 'haproxy']
elif (os.path.isfile('/usr/lib/systemd/system/haproxy.service') or
os.path.isfile('/lib/systemd/system/haproxy.service') or
os.path.isfile('/etc/systemd/system/haproxy.service')):
logger.debug("we seem to be running on systemd based system")
reloadCommand = ['systemctl', 'reload', 'haproxy']
elif os.path.isfile('/etc/init.d/haproxy'):
logger.debug("we seem to be running on a sysvinit based system")
reloadCommand = ['/etc/init.d/haproxy', 'reload']
else:
# if no haproxy exists (maybe running in a container)
logger.debug("no haproxy detected. won't reload.")
reloadCommand = None
if reloadCommand:
logger.info("reloading using %s", " ".join(reloadCommand))
try:
start_time = time.time()
checkpoint_time = start_time
# Retry or log the reload every 10 seconds
reload_frequency = args.reload_interval
reload_retries = args.max_reload_retries
enable_retries = True
infinite_retries = False
if reload_retries == 0:
enable_retries = False
elif reload_retries < 0:
infinite_retries = True
old_pids = get_haproxy_pids()
subprocess.check_call(reloadCommand, close_fds=True)
new_pids = get_haproxy_pids()
logger.debug("Waiting for new haproxy pid (old pids: [%s], " +
"new_pids: [%s])...", old_pids, new_pids)
# Wait until the reload actually occurs and there's a new PID
while True:
if len(new_pids - old_pids) >= 1:
logger.debug("new pids: [%s]", new_pids)
logger.debug("reload finished, took %s seconds",
time.time() - start_time)
break
timeSinceCheckpoint = time.time() - checkpoint_time
if (timeSinceCheckpoint >= reload_frequency):
logger.debug("Still waiting for new haproxy pid after " +
"%s seconds (old pids: [%s], " +
"new_pids: [%s]).",
time.time() - start_time, old_pids, new_pids)
checkpoint_time = time.time()
if enable_retries:
if not infinite_retries:
reload_retries -= 1
if reload_retries == 0:
logger.debug("reload failed after %s seconds",
time.time() - start_time)
break
logger.debug("Attempting reload again...")
subprocess.check_call(reloadCommand, close_fds=True)
time.sleep(0.1)
new_pids = get_haproxy_pids()
except OSError as ex:
logger.error("unable to reload config using command %s",
" ".join(reloadCommand))
logger.error("OSError: %s", ex)
except subprocess.CalledProcessError as ex:
logger.error("unable to reload config using command %s",
" ".join(reloadCommand))
logger.error("reload returned non-zero: %s", ex)
def generateHttpVhostAcl(
templater, app, backend, haproxy_map, map_array,
haproxy_dir, duplicate_map):
# If the hostname contains the delimiter ',', then the marathon app is
# requesting multiple hostname matches for the same backend, and we need
# to use alternate templates from the default one-acl/one-use_backend.
staging_http_frontends = ""
staging_https_frontends = ""
https_grouped_frontend_list = defaultdict(lambda: ([], set(), set()))
if "," in app.hostname:
logger.debug(
"vhost label specifies multiple hosts: %s", app.hostname)
vhosts = app.hostname.split(',')
acl_name = re.sub(r'[^a-zA-Z0-9\-]', '_', vhosts[0]) + \
'_' + app.appId[1:].replace('/', '_')
if app.path:
if app.authRealm:
# Set the path ACL if it exists
logger.debug("adding path acl, path=%s", app.path)
http_frontend_acl = \
templater.\
haproxy_http_frontend_acl_only_with_path_and_auth(app)
staging_http_frontends += http_frontend_acl.format(
path=app.path,
cleanedUpHostname=acl_name,
hostname=vhosts[0],
realm=app.authRealm,
backend=backend
)
https_frontend_acl = \
templater.\
haproxy_https_frontend_acl_only_with_path(app)
staging_https_frontends += https_frontend_acl.format(
path=app.path,
cleanedUpHostname=acl_name,
hostname=vhosts[0],
realm=app.authRealm,
backend=backend
)
else:
# Set the path ACL if it exists
logger.debug("adding path acl, path=%s", app.path)
http_frontend_acl = \
templater.haproxy_http_frontend_acl_only_with_path(app)
staging_http_frontends += http_frontend_acl.format(
path=app.path,
backend=backend
)
https_frontend_acl = \
templater.haproxy_https_frontend_acl_only_with_path(app)
staging_https_frontends += https_frontend_acl.format(
path=app.path,
backend=backend
)
temp_frontend_head = staging_https_frontends
for vhost_hostname in vhosts:
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, temp_frontend_head))
if app.sslCert is not None:
https_grouped_frontend_list[vhost_hostname][1].add(app.sslCert)
if app.bindOptions is not None:
https_grouped_frontend_list[vhost_hostname][2].add(
app.bindOptions)
logger.debug("processing vhost %s", vhost_hostname)
if haproxy_map and not app.path and not app.authRealm and \
not app.redirectHttpToHttps:
if 'map_http_frontend_acl' not in duplicate_map:
app.backend_weight = -1
http_frontend_acl = templater.\
haproxy_map_http_frontend_acl_only(app)
staging_http_frontends += http_frontend_acl.format(
haproxy_dir=haproxy_dir
)
duplicate_map['map_http_frontend_acl'] = 1
map_element = {}
map_element[vhost_hostname] = backend
if map_element not in map_array:
map_array.append(map_element)
else:
http_frontend_acl = templater.\
haproxy_http_frontend_acl_only(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname
)
# Tack on the SSL ACL as well
if app.path:
if app.authRealm:
https_frontend_acl = templater.\
haproxy_https_frontend_acl_with_auth_and_path(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
https_frontend_acl = \
templater.haproxy_https_frontend_acl_with_path(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname,
appId=app.appId,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
if app.authRealm:
https_frontend_acl = \
templater.haproxy_https_frontend_acl_with_auth(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
if haproxy_map:
if 'map_https_frontend_acl' not in duplicate_map:
app.backend_weight = -1
https_frontend_acl = templater.\
haproxy_map_https_frontend_acl(app)
staging_https_frontend = https_frontend_acl. \
format(
hostname=vhost_hostname,
haproxy_dir=haproxy_dir
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0]\
.append(
(app.backend_weight, staging_https_frontend))
duplicate_map['map_https_frontend_acl'] = 1
map_element = {}
map_element[vhost_hostname] = backend
if map_element not in map_array:
map_array.append(map_element)
else:
https_frontend_acl = templater.\
haproxy_https_frontend_acl(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=vhost_hostname,
appId=app.appId,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[vhost_hostname][0].append(
(app.backend_weight, staging_https_frontend))
# We've added the http acl lines, now route them to the same backend
if app.redirectHttpToHttps:
logger.debug("writing rule to redirect http to https traffic")
if app.path:
haproxy_backend_redirect_http_to_https = \
templater.\
haproxy_backend_redirect_http_to_https_with_path(app)
frontend = haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr,
cleanedUpHostname=acl_name,
backend=backend
)
staging_http_frontends += frontend
else:
haproxy_backend_redirect_http_to_https = \
templater.haproxy_backend_redirect_http_to_https(app)
frontend = haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr,
cleanedUpHostname=acl_name
)
staging_http_frontends += frontend
elif app.path:
if app.authRealm:
http_frontend_route = \
templater.\
haproxy_http_frontend_routing_only_with_path_and_auth(app)
staging_http_frontends += http_frontend_route.format(
cleanedUpHostname=acl_name,
realm=app.authRealm,
backend=backend
)
else:
http_frontend_route = \
templater.haproxy_http_frontend_routing_only_with_path(app)
staging_http_frontends += http_frontend_route.format(
cleanedUpHostname=acl_name,
backend=backend
)
else:
if app.authRealm:
http_frontend_route = \
templater.\
haproxy_http_frontend_routing_only_with_auth(app)
staging_http_frontends += http_frontend_route.format(
cleanedUpHostname=acl_name,
realm=app.authRealm,
backend=backend
)
else:
if not haproxy_map:
http_frontend_route = \
templater.haproxy_http_frontend_routing_only(app)
staging_http_frontends += http_frontend_route.format(
cleanedUpHostname=acl_name,
backend=backend
)
else:
# A single hostname in the VHOST label
logger.debug(
"adding virtual host for app with hostname %s", app.hostname)
acl_name = re.sub(r'[^a-zA-Z0-9\-]', '_', app.hostname) + \
'_' + app.appId[1:].replace('/', '_')
if app.sslCert is not None:
https_grouped_frontend_list[app.hostname][1].add(app.sslCert)
if app.bindOptions is not None:
https_grouped_frontend_list[app.hostname][2].add(app.bindOptions)
if app.path:
if app.redirectHttpToHttps:
http_frontend_acl = \
templater.haproxy_http_frontend_acl_only(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname
)
http_frontend_acl = \
templater.haproxy_http_frontend_acl_only_with_path(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
path=app.path,
backend=backend
)
haproxy_backend_redirect_http_to_https = \
templater.\
haproxy_backend_redirect_http_to_https_with_path(app)
frontend = haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr,
cleanedUpHostname=acl_name,
backend=backend
)
staging_http_frontends += frontend
else:
if app.authRealm:
http_frontend_acl = \
templater.\
haproxy_http_frontend_acl_with_auth_and_path(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
path=app.path,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
else:
http_frontend_acl = \
templater.haproxy_http_frontend_acl_with_path(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
path=app.path,
appId=app.appId,
backend=backend
)
https_frontend_acl = \
templater.haproxy_https_frontend_acl_only_with_path(app)
staging_https_frontend = https_frontend_acl.format(
path=app.path,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
if app.authRealm:
https_frontend_acl = \
templater.\
haproxy_https_frontend_acl_with_auth_and_path(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
path=app.path,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
https_frontend_acl = \
templater.haproxy_https_frontend_acl_with_path(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
if app.redirectHttpToHttps:
http_frontend_acl = \
templater.haproxy_http_frontend_acl_only(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname
)
haproxy_backend_redirect_http_to_https = \
templater.\
haproxy_backend_redirect_http_to_https(app)
frontend = haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr,
cleanedUpHostname=acl_name
)
staging_http_frontends += frontend
else:
if app.authRealm:
http_frontend_acl = \
templater.haproxy_http_frontend_acl_with_auth(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
else:
if haproxy_map:
if 'map_http_frontend_acl' not in duplicate_map:
app.backend_weight = -1
http_frontend_acl = \
templater.haproxy_map_http_frontend_acl(app)
staging_http_frontends += http_frontend_acl.format(
haproxy_dir=haproxy_dir
)
duplicate_map['map_http_frontend_acl'] = 1
map_element = {}
map_element[app.hostname] = backend
if map_element not in map_array:
map_array.append(map_element)
else:
http_frontend_acl = \
templater.haproxy_http_frontend_acl(app)
staging_http_frontends += http_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
backend=backend
)
if app.authRealm:
https_frontend_acl = \
templater.haproxy_https_frontend_acl_with_auth(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
realm=app.authRealm,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
else:
if haproxy_map:
if 'map_https_frontend_acl' not in duplicate_map:
app.backend_weight = -1
https_frontend_acl = templater.\
haproxy_map_https_frontend_acl(app)
staging_https_frontend = https_frontend_acl.format(
hostname=app.hostname,
haproxy_dir=haproxy_dir
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
duplicate_map['map_https_frontend_acl'] = 1
map_element = {}
map_element[app.hostname] = backend
if map_element not in map_array:
map_array.append(map_element)
else:
https_frontend_acl = templater.\
haproxy_https_frontend_acl(app)
staging_https_frontend = https_frontend_acl.format(
cleanedUpHostname=acl_name,
hostname=app.hostname,
appId=app.appId,
backend=backend
)
staging_https_frontends += staging_https_frontend
https_grouped_frontend_list[app.hostname][0].append(
(app.backend_weight, staging_https_frontend))
return (app.backend_weight,
staging_http_frontends,
staging_https_frontends,
https_grouped_frontend_list)
def writeConfigAndValidate(
config, config_file, domain_map_string, domain_map_file,
app_map_string, app_map_file, haproxy_map):
# Test run, print to stdout and exit
if args.dry:
print(config)
sys.exit()
temp_config = config
# First write the new maps to temporary files
if haproxy_map:
domain_temp_map_file = writeReplacementTempFile(domain_map_string,
domain_map_file)
app_temp_map_file = writeReplacementTempFile(app_map_string,
app_map_file)
# Change the file paths in the config to (temporarily) point to the
# temporary map files so those can also be checked when the config is
# validated
temp_config = config.replace(
domain_map_file, domain_temp_map_file
).replace(app_map_file, app_temp_map_file)
# Write the new config to a temporary file
haproxyTempConfigFile = writeReplacementTempFile(temp_config, config_file)
if validateConfig(haproxyTempConfigFile):
# Move into place
if haproxy_map:
moveTempFile(domain_temp_map_file, domain_map_file, "domain_map")
moveTempFile(app_temp_map_file, app_map_file, "app_map")
# Edit the config file again to point to the actual map paths
with open(haproxyTempConfigFile, 'w') as tempConfig:
tempConfig.write(config)
else:
truncateMapFileIfExists(domain_map_file)
truncateMapFileIfExists(app_map_file)
moveTempFile(haproxyTempConfigFile, config_file, "hap_cfg")
return True
else:
moveTempFile(haproxyTempConfigFile, 'haproxy_tmp_conf_fail',
'haproxy_temp_config_fail')
removeTempFileIfExist(domain_temp_map_file)
removeTempFileIfExist(app_temp_map_file)
return False
def writeReplacementTempFile(content, file_to_replace):
# Create a temporary file containing the given content that will be used to
# replace the given file after validation. Returns the path to the
# temporary file.
fd, tempFile = mkstemp()
logger.debug(
"writing temp file %s that will replace %s", tempFile, file_to_replace)
with os.fdopen(fd, 'w') as tempConfig:
tempConfig.write(content)
# Ensure the new file is created with the same permissions the old file had
# or use defaults if the file doesn't exist yet
perms = 0o644
if os.path.isfile(file_to_replace):
perms = stat.S_IMODE(os.lstat(file_to_replace).st_mode)
os.chmod(tempFile, perms)
return tempFile
def validateConfig(haproxy_config_file):
# If skip validation flag is provided, don't check.
if args.skip_validation:
logger.debug("skipping validation.")
return True
# Check that config is valid
cmd = ['haproxy', '-f', haproxy_config_file, '-c']
logger.debug("checking config with command: " + str(cmd))
returncode = subprocess.call(args=cmd)
if returncode == 0:
return True
else:
logger.error("haproxy returned non-zero when checking config")
return False
def moveTempFile(temp_file, dest_file, tmp_filename):
# Replace the old file with the new from its temporary location
for suffix in range(args.archive_versions - 1, 0, -1):
tmp_src_file = "/tmp/" + tmp_filename + "." + str(suffix)
tmp_dest_file = "/tmp/" + tmp_filename + "." + str(suffix + 1)
if os.path.isfile(tmp_src_file):
logger.debug("Copying temp file %s to %s",
tmp_src_file, tmp_dest_file)
copy(tmp_src_file, tmp_dest_file)
logger.debug("Copying temp files %s to %s",
temp_file, "/tmp/" + tmp_filename + ".1")
copy(temp_file, "/tmp/" + tmp_filename + ".1")
logger.debug("moving temp file %s to %s", temp_file, dest_file)
move(temp_file, dest_file)
def truncateMapFileIfExists(map_file):
if os.path.isfile(map_file):
logger.debug("Truncating map file as haproxy-map flag "
"is disabled %s", map_file)
fd = os.open(map_file, os.O_RDWR)
os.ftruncate(fd, 0)
os.close(fd)
def removeTempFileIfExist(temp_file):
if os.path.isfile(temp_file):
logger.debug("delete tempfile %s", temp_file)
os.remove(temp_file)
def generateAndValidateTempConfig(config, config_file, domain_map_array,
app_map_array, haproxy_map):
temp_config_file = "%s.tmp" % config_file
domain_map_file = os.path.join(os.path.dirname(temp_config_file),
"domain2backend.map.tmp")
app_map_file = os.path.join(os.path.dirname(temp_config_file),
"app2backend.map.tmp")
domain_map_string = str()
app_map_string = str()
if haproxy_map:
domain_map_string = generateMapString(domain_map_array)
app_map_string = generateMapString(app_map_array)
return writeConfigAndValidate(
config, temp_config_file, domain_map_string, domain_map_file,
app_map_string, app_map_file, haproxy_map)
def compareWriteAndReloadConfig(config, config_file, domain_map_array,
app_map_array, haproxy_map):
changed = False
config_valid = False
# See if the last config on disk matches this, and if so don't reload
# haproxy
domain_map_file = os.path.join(os.path.dirname(config_file),
"domain2backend.map")
app_map_file = os.path.join(os.path.dirname(config_file),
"app2backend.map")
domain_map_string = str()
app_map_string = str()
runningConfig = str()
try:
logger.debug("reading running config from %s", config_file)
with open(config_file, "r") as f:
runningConfig = f.read()
except IOError:
logger.warning("couldn't open config file for reading")
if haproxy_map:
domain_map_string = generateMapString(domain_map_array)
app_map_string = generateMapString(app_map_array)
if (runningConfig != config or
compareMapFile(domain_map_file, domain_map_string) or
compareMapFile(app_map_file, app_map_string)):
logger.info(
"running config/map is different from generated"
" config - reloading")
if writeConfigAndValidate(
config, config_file, domain_map_string, domain_map_file,
app_map_string, app_map_file, haproxy_map):
reloadConfig()
changed = True
config_valid = True
else:
logger.warning("skipping reload: config/map not valid")
changed = True
config_valid = False
else:
logger.debug("skipping reload: config/map unchanged")
changed = False
config_valid = True
else:
truncateMapFileIfExists(domain_map_file)
truncateMapFileIfExists(app_map_file)
if runningConfig != config:
logger.info(
"running config is different from generated config"
" - reloading")
if writeConfigAndValidate(
config, config_file, domain_map_string, domain_map_file,
app_map_string, app_map_file, haproxy_map):
reloadConfig()
changed = True
config_valid = True
else:
logger.warning("skipping reload: config not valid")
changed = True
config_valid = False
else:
changed = False
config_valid = True
logger.debug("skipping reload: config unchanged")
return changed, config_valid
def generateMapString(map_array):
# Generate the string representation of the map file from a map array
map_string = str()
for element in map_array:
for key, value in list(element.items()):
map_string = map_string + str(key) + " " + str(value) + "\n"
return map_string
def compareMapFile(map_file, map_string):
# Read the map file (creating an empty file if it does not exist) and
# compare its contents to the given map string. Returns true if the map
# string is different to the contents of the file.
if not os.path.isfile(map_file):
open(map_file, 'a').close()
runningmap = str()
try:
logger.debug("reading map config from %s", map_file)
with open(map_file, "r") as f:
runningmap = f.read()
except IOError:
logger.warning("couldn't open map file for reading")
return runningmap != map_string
def get_health_check(app, portIndex):
if 'healthChecks' not in app:
return None
for check in app['healthChecks']:
if check.get('port'):
return check
if check.get('portIndex') == portIndex:
return check
return None
healthCheckResultCache = LRUCache()
def get_apps(marathon, apps=[]):
if len(apps) == 0:
apps = marathon.list()
logger.debug("got apps %s", [app["id"] for app in apps])
excluded_states = {'TASK_KILLING', 'TASK_KILLED',
'TASK_FINISHED', 'TASK_ERROR'}
marathon_apps = []
# This process requires 2 passes: the first is to gather apps belonging
# to a deployment group.
processed_apps = []
deployment_groups = {}
for app in apps:
deployment_group = None
if 'HAPROXY_DEPLOYMENT_GROUP' in app['labels']:
deployment_group = app['labels']['HAPROXY_DEPLOYMENT_GROUP']
# mutate the app id to match deployment group
if deployment_group[0] != '/':
deployment_group = '/' + deployment_group
app['id'] = deployment_group
else:
processed_apps.append(app)
continue
if deployment_group in deployment_groups:
# merge the groups, with the oldest taking precedence
prev = deployment_groups[deployment_group]
cur = app
# If for some reason neither label is set correctly, then it's a
# crapshoot. Most likely, whichever one is unset was not deployed
# with ZDD, so we should prefer the one with a date set.
cur_date = datetime.datetime.min
prev_date = datetime.datetime.min
if 'HAPROXY_DEPLOYMENT_STARTED_AT' in prev['labels']:
prev_date = dateutil.parser.parse(
prev['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'])
if 'HAPROXY_DEPLOYMENT_STARTED_AT' in cur['labels']:
cur_date = dateutil.parser.parse(
cur['labels']['HAPROXY_DEPLOYMENT_STARTED_AT'])
old = new = None
if prev_date < cur_date:
old = prev
new = cur
else:
new = prev
old = cur
if 'HAPROXY_DEPLOYMENT_NEW_INSTANCES' in new['labels']:
if int(new['labels']['HAPROXY_DEPLOYMENT_NEW_INSTANCES'] != 0):
new_scale_time = dateutil.parser.parse(
new['versionInfo']['lastScalingAt'])
old_scale_time = dateutil.parser.parse(
old['versionInfo']['lastScalingAt'])
if old_scale_time > new_scale_time:
temp = old
old = new
new = temp
target_instances = \
int(new['labels']['HAPROXY_DEPLOYMENT_TARGET_INSTANCES'])
# Mark N tasks from old app as draining, where N is the
# number of instances in the new app. Sort the old tasks so that
# order is deterministic (i.e. so that we always drain the same
# tasks).
old_tasks = sorted(old['tasks'], key=lambda task: task['id'])
healthy_new_instances = 0
if len(app['healthChecks']) > 0:
for task in new['tasks']:
if 'healthCheckResults' not in task:
continue
alive = True
for result in task['healthCheckResults']:
if not result['alive']:
alive = False
if alive:
healthy_new_instances += 1
else:
healthy_new_instances = new['instances']
maximum_drainable = \
max(0, (healthy_new_instances + old['instances']) -
target_instances)
for i in range(0, min(len(old_tasks),
healthy_new_instances,
maximum_drainable)):
old_tasks[i]['draining'] = True
# merge tasks from new app into old app
merged = old
old_tasks.extend(new['tasks'])
merged['tasks'] = old_tasks
deployment_groups[deployment_group] = merged
else:
deployment_groups[deployment_group] = app
processed_apps.extend(deployment_groups.values())
# Reset the service port assigner. This forces the port assigner to
# re-assign ports for IP-per-task applications. The upshot is that
# the service port for a particular app may change dynamically, but
# the service port will be deterministic and identical across all
# instances of the marathon-lb.
SERVICE_PORT_ASSIGNER.reset()
for app in processed_apps:
appId = app['id']
if appId[1:] == os.environ.get("FRAMEWORK_NAME"):
continue
marathon_app = MarathonApp(marathon, appId, app)
if 'HAPROXY_GROUP' in marathon_app.app['labels']:
marathon_app.groups = \
marathon_app.app['labels']['HAPROXY_GROUP'].split(',')
marathon_apps.append(marathon_app)
service_ports = SERVICE_PORT_ASSIGNER.get_service_ports(app)
for i, servicePort in enumerate(service_ports):
if servicePort is None:
logger.warning("Skipping undefined service port")
continue
service = MarathonService(appId, servicePort,
get_health_check(app, i),
marathon.strict_mode())
for key_unformatted in label_keys:
key = key_unformatted.format(i)
if key in marathon_app.app['labels']:
func = label_keys[key_unformatted]
func(service,
key_unformatted,
marathon_app.app['labels'][key])
# https://github.com/mesosphere/marathon-lb/issues/198
# Marathon app manifest which defines healthChecks is
# defined for a specific given service port identified
# by either a port or portIndex.
# (Marathon itself will prefer port before portIndex
# https://mesosphere.github.io/marathon/docs/health-checks.html)
#
# We want to be able to instruct HAProxy
# to use health check defined for service port B
# in marathon to indicate service port A is healthy
# or not for service port A in HAProxy.
#
# This is done by specifying a label:
# HAPROXY_{n}_BACKEND_HEALTHCHECK_PORT_INDEX
#
# TODO(norangshol) Refactor and supply MarathonService
# TODO(norangshol) with its labels and do this in its constructor?
if service.healthCheck is None \
and service.healthcheck_port_index is not None:
service.healthCheck = \
get_health_check(app, service.healthcheck_port_index)
if service.healthCheck:
healthProto = service.healthCheck['protocol']
if healthProto in ['HTTP', 'HTTPS', 'MESOS_HTTP',
'MESOS_HTTPS']:
service.mode = 'http'
marathon_app.services[servicePort] = service
for task in app['tasks']:
# Marathon 0.7.6 bug workaround
if not task['host']:
logger.warning("Ignoring Marathon task without host " +
task['id'])
continue
# 'state' will not be present in test cases.
# Should always be present in an actual cluster
if 'state' in task and task['state'] in excluded_states:
logger.warning("Ignoring non-running task " + task['id'] +
" with state " + task['state'])
continue
if marathon.health_check() and 'healthChecks' in app and \
len(app['healthChecks']) > 0:
alive = True
if 'healthCheckResults' not in task:
# use previously cached result, if it exists
if not healthCheckResultCache.get(task['id'], False):
continue
else:
for result in task['healthCheckResults']:
if not result['alive']:
alive = False
healthCheckResultCache.set(task['id'], alive)
if not alive:
continue
task_ip, task_ports = get_task_ip_and_ports(app, task)
if task_ip is None:
logger.warning("Task has no resolvable IP address - skip")
continue
draining = task.get('draining', False)
# if different versions of app have different number of ports,
# try to match as many ports as possible
for task_port, service_port in zip(task_ports, service_ports):
service = marathon_app.services.get(service_port, None)
if service:
service.groups = marathon_app.groups
service.add_backend(task['host'],
task_ip,
task_port,
draining)
# Convert into a list for easier consumption
apps_list = []
for marathon_app in marathon_apps:
for service in list(marathon_app.services.values()):
if service.backends:
apps_list.append(service)
return apps_list
def regenerate_config(marathon, config_file, groups, bind_http_https,
ssl_certs, templater, haproxy_map, group_https_by_vhost):
domain_map_array = []
app_map_array = []
raw_apps = marathon.list()
apps = get_apps(marathon, raw_apps)
generated_config = config(apps, groups, bind_http_https, ssl_certs,
templater, haproxy_map, domain_map_array,
app_map_array, config_file, group_https_by_vhost)
(changed, config_valid) = compareWriteAndReloadConfig(
generated_config, config_file, domain_map_array, app_map_array,
haproxy_map)
if changed and not config_valid:
apps = make_config_valid_and_regenerate(marathon,
raw_apps,
groups,
bind_http_https,
ssl_certs,
templater,
haproxy_map,
domain_map_array,
app_map_array,
config_file,
group_https_by_vhost)
return apps
# Build up a valid configuration by adding one app at a time and checking
# for valid config file after each app
def make_config_valid_and_regenerate(marathon,
raw_apps,
groups,
bind_http_https,
ssl_certs,
templater,
haproxy_map,
domain_map_array,
app_map_array,
config_file,
group_https_by_vhost):
try:
start_time = time.time()
apps = []
valid_apps = []
excluded_ids = []
included_ids = []
for app in raw_apps:
domain_map_array = []
app_map_array = []
valid_apps.append(app)
apps = get_apps(marathon, valid_apps)
generated_config = config(apps, groups, bind_http_https,
ssl_certs, templater, haproxy_map,
domain_map_array, app_map_array,
config_file, group_https_by_vhost)
config_valid = generateAndValidateTempConfig(generated_config,
config_file,
domain_map_array,
app_map_array,
haproxy_map)
if not config_valid:
logger.warn(
"invalid configuration caused by app %s; "
"it will be excluded", app["id"])
del valid_apps[-1]
excluded_ids.append(app["id"])
else:
included_ids.append(app["id"])
if len(valid_apps) > 0:
logger.debug("reloading valid config including apps: %s, and "
"excluding apps: %s", included_ids, excluded_ids)
domain_map_array = []
app_map_array = []
apps = get_apps(marathon, valid_apps)
valid_config = config(apps, groups, bind_http_https,
ssl_certs, templater, haproxy_map,
domain_map_array, app_map_array,
config_file, group_https_by_vhost)
compareWriteAndReloadConfig(valid_config,
config_file,
domain_map_array,
app_map_array, haproxy_map)
else:
logger.error("A valid config file could not be generated after "
"excluding all apps! skipping reload")
logger.debug("reloading while excluding invalid tasks finished, "
"took %s seconds",
time.time() - start_time)
return apps
except Exception:
logger.exception("Unexpected error!")
class MarathonEventProcessor(object):
def __init__(self, marathon,
config_file,
groups,
bind_http_https,
ssl_certs,
haproxy_map,
group_https_by_vhost):
self.__marathon = marathon
# appId -> MarathonApp
self.__apps = dict()
self.__config_file = config_file
self.__groups = groups
self.__templater = ConfigTemplater()
self.__bind_http_https = bind_http_https
self.__group_https_by_vhost = group_https_by_vhost
self.__ssl_certs = ssl_certs
self.__condition = threading.Condition()
self.__pending_reset = False
self.__pending_reload = False
self.__haproxy_map = haproxy_map
self.__thread = None
# Fetch the base data
self.reset_from_tasks()
def start(self):
self.__stop = False
if self.__thread is not None and self.__thread.is_alive():
self.reset_from_tasks()
return
self.__thread = threading.Thread(target=self.try_reset)
self.__thread.start()
def try_reset(self):
with self.__condition:
logger.info('({}): starting event processor thread'.format(
threading.get_ident()))
while True:
self.__condition.acquire()
if self.__stop:
logger.info('({}): stopping event processor thread'.format(
threading.get_ident()))
self.__condition.release()
return
if not self.__pending_reset and not self.__pending_reload:
if not self.__condition.wait(300):
logger.info('({}): condition wait expired'.format(
threading.get_ident()))
pending_reset = self.__pending_reset
pending_reload = self.__pending_reload
self.__pending_reset = False
self.__pending_reload = False
self.__condition.release()
# Reset takes precedence over reload
if pending_reset:
self.do_reset()
elif pending_reload:
self.do_reload()
else:
# Timed out waiting on the condition variable, just do a
# full reset for good measure (as was done before).
self.do_reset()
def do_reset(self):
try:
start_time = time.time()
self.__apps = regenerate_config(self.__marathon,
self.__config_file,
self.__groups,
self.__bind_http_https,
self.__ssl_certs,
self.__templater,
self.__haproxy_map,
self.__group_https_by_vhost)
logger.debug("({0}): updating tasks finished, "
"took {1} seconds".format(
threading.get_ident(),
time.time() - start_time))
except requests.exceptions.ConnectionError as e:
logger.error("({0}): Connection error({1}): {2}".format(
threading.get_ident(), e.errno, e.strerror))
except Exception:
logger.exception("Unexpected error!")
def do_reload(self):
try:
# Validate the existing config before reloading
logger.debug("({}): attempting to reload existing "
"config...".format(
threading.get_ident()))
if validateConfig(self.__config_file):
reloadConfig()
except Exception:
logger.exception("Unexpected error!")
def stop(self):
self.__condition.acquire()
self.__stop = True
self.__condition.notify()
self.__condition.release()
def reset_from_tasks(self):
self.__condition.acquire()
self.__pending_reset = True
self.__condition.notify()
self.__condition.release()
def reload_existing_config(self):
self.__condition.acquire()
self.__pending_reload = True
self.__condition.notify()
self.__condition.release()
def handle_event(self, event):
if event['eventType'] == 'status_update_event' or \
event['eventType'] == 'health_status_changed_event' or \
event['eventType'] == 'api_post_event':
self.reset_from_tasks()
def handle_signal(self, sig, stack):
if sig == signal.SIGHUP:
logger.debug('received signal SIGHUP - reloading config')
self.reset_from_tasks()
elif sig == signal.SIGUSR1:
logger.debug('received signal SIGUSR1 - reloading existing config')
self.reload_existing_config()
else:
logger.warning('received unknown signal %d' % (sig,))
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Marathon HAProxy Load Balancer",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--longhelp",
help="Print out configuration details",
action="store_true")
parser.add_argument("--marathon", "-m",
nargs="+",
help="[required] Marathon endpoint, eg. " +
"-m http://marathon1:8080 http://marathon2:8080",
default=["http://master.mesos:8080"])
parser.add_argument("--haproxy-config",
help="Location of haproxy configuration",
default="/etc/haproxy/haproxy.cfg")
parser.add_argument("--group",
help="[required] Only generate config for apps which"
" list the specified names. Use '*' to match all"
" groups, including those without a group specified.",
action="append",
default=list())
parser.add_argument("--command", "-c",
help="If set, run this command to reload haproxy.",
default=None)
parser.add_argument("--max-reload-retries",
help="Max reload retries before failure. Reloads"
" happen every --reload-interval seconds. Set to"
" 0 to disable or -1 for infinite retries.",
type=int, default=10)
parser.add_argument("--reload-interval",
help="Wait this number of seconds between"
" reload retries.",
type=int, default=10)
parser.add_argument("--strict-mode",
help="If set, backends are only advertised if"
" HAPROXY_{n}_ENABLED=true. Strict mode will be"
" enabled by default in a future release.",
action="store_true")
parser.add_argument("--sse", "-s",
help="Use Server Sent Events",
action="store_true")
parser.add_argument("--archive-versions",
help="Number of config versions to archive",
type=int, default=5)
parser.add_argument("--health-check", "-H",
help="If set, respect Marathon's health check "
"statuses before adding the app instance into "
"the backend pool.",
action="store_true")
parser.add_argument("--lru-cache-capacity",
help="LRU cache size (in number "
"of items). This should be at least as large as the "
"number of tasks exposed via marathon-lb.",
type=int, default=1000
)
parser.add_argument("--haproxy-map",
help="Use HAProxy maps for domain name to backend"
"mapping.", action="store_true")
parser.add_argument("--dont-bind-http-https",
help="Don't bind to HTTP and HTTPS frontends.",
action="store_true")
parser.add_argument("--group-https-by-vhost",
help="Group https frontends by vhost.",
action="store_true")
parser.add_argument("--ssl-certs",
help="List of SSL certificates separated by comma"
"for frontend marathon_https_in"
"Ex: /etc/ssl/site1.co.pem,/etc/ssl/site2.co.pem",
default="/etc/ssl/cert.pem")
parser.add_argument("--skip-validation",
help="Skip haproxy config file validation",
action="store_true")
parser.add_argument("--dry", "-d",
help="Only print configuration to console",
action="store_true")
parser.add_argument("--min-serv-port-ip-per-task",
help="Minimum port number to use when auto-assigning "
"service ports for IP-per-task applications",
type=int, default=10050)
parser.add_argument("--max-serv-port-ip-per-task",
help="Maximum port number to use when auto-assigning "
"service ports for IP-per-task applications",
type=int, default=10100)
parser = set_logging_args(parser)
parser = set_marathon_auth_args(parser)
return parser
def load_json(data_str):
return cleanup_json(json.loads(data_str))
if __name__ == '__main__':
# Process arguments
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
# Print the long help text if flag is set
if args.longhelp:
print(__doc__)
print('```')
arg_parser.print_help()
print('```')
print(ConfigTemplater().get_descriptions())
sys.exit()
# otherwise make sure that a Marathon URL was specified
else:
if args.marathon is None:
arg_parser.error('argument --marathon/-m is required')
if bool(args.min_serv_port_ip_per_task) != \
bool(args.max_serv_port_ip_per_task):
arg_parser.error(
'either specify both --min-serv-port-ip-per-task '
'and --max-serv-port-ip-per-task or neither (set both to zero '
'to disable auto assignment)')
if args.min_serv_port_ip_per_task > args.max_serv_port_ip_per_task:
arg_parser.error(
'cannot set --min-serv-port-ip-per-task to a higher value '
'than --max-serv-port-ip-per-task')
if len(args.group) == 0:
arg_parser.error('argument --group is required: please' +
'specify at least one group name')
# Configure the service port assigner if min/max ports have been specified.
if args.min_serv_port_ip_per_task and args.max_serv_port_ip_per_task:
SERVICE_PORT_ASSIGNER.set_ports(args.min_serv_port_ip_per_task,
args.max_serv_port_ip_per_task)
# Set request retries
s = requests.Session()
a = requests.adapters.HTTPAdapter(max_retries=3)
s.mount('http://', a)
# Setup logging
setup_logging(logger, args.syslog_socket, args.log_format, args.log_level)
# initialize health check LRU cache
if args.health_check:
healthCheckResultCache = LRUCache(args.lru_cache_capacity)
ip_cache.set(LRUCache(args.lru_cache_capacity))
# Marathon API connector
marathon = Marathon(args.marathon,
args.health_check,
args.strict_mode,
get_marathon_auth_params(args),
args.marathon_ca_cert)
# If we're going to be handling events, set up the event processor and
# hook it up to the process signals.
if args.sse:
processor = MarathonEventProcessor(marathon,
args.haproxy_config,
args.group,
not args.dont_bind_http_https,
args.ssl_certs,
args.haproxy_map,
args.group_https_by_vhost)
signal.signal(signal.SIGHUP, processor.handle_signal)
signal.signal(signal.SIGUSR1, processor.handle_signal)
backoffFactor = 1.5
waitSeconds = 3
maxWaitSeconds = 300
waitResetSeconds = 600
while True:
stream_started = time.time()
currentWaitSeconds = random.random() * waitSeconds
stream = marathon.get_event_stream()
try:
# processor start is now idempotent and will start at
# most one thread
processor.start()
events = marathon.iter_events(stream)
for event in events:
if (event.data.strip() != ''):
# marathon sometimes sends more than one json per event
# e.g. {}\r\n{}\r\n\r\n
for real_event_data in re.split(r'\r\n', event.data):
data = load_json(real_event_data)
logger.info(
"received event of type {0}"
.format(data['eventType']))
processor.handle_event(data)
else:
logger.info("skipping empty message")
except pycurl.error as e:
errno, e_msg = e.args
# Error number 28:
# 'Operation too slow. Less than 1 bytes/sec transferred
# the last 300 seconds'
# This happens when there is no activity on the marathon
# event stream for the last 5 minutes. In this case we
# should immediately reconnect in case the connection to
# marathon died silently so that we miss as few events as
# possible.
if errno == 28:
m = 'Possible timeout detected: {}, reconnecting now...'
logger.info(m.format(e_msg))
currentWaitSeconds = 0
else:
logger.exception("Caught exception")
logger.error("Reconnecting in {}s...".format(
currentWaitSeconds))
except Exception:
logger.exception("Caught exception")
logger.error("Reconnecting in {}s...".format(
currentWaitSeconds))
# We must close the connection because we are calling
# get_event_stream on the next loop
stream.curl.close()
if currentWaitSeconds > 0:
# Increase the next waitSeconds by the backoff factor
waitSeconds = backoffFactor * waitSeconds
# Don't sleep any more than 5 minutes
if waitSeconds > maxWaitSeconds:
waitSeconds = maxWaitSeconds
# Reset the backoff if it's been more than 10 minutes
if (time.time() - stream_started) > waitResetSeconds:
waitSeconds = 3
time.sleep(currentWaitSeconds)
processor.stop()
else:
# Generate base config
regenerate_config(marathon,
args.haproxy_config,
args.group,
not args.dont_bind_http_https,
args.ssl_certs,
ConfigTemplater(),
args.haproxy_map,
args.group_https_by_vhost)
|
test.py
|
# vim: sw=4:ts=4:et
import logging
import os, os.path
import pickle
import re
import shutil
import signal
import tarfile
import tempfile
import threading
import time
import unittest
import uuid
from multiprocessing import Queue, cpu_count, Event
from queue import Empty
import saq, saq.test
from saq.analysis import RootAnalysis, _get_io_read_count, _get_io_write_count, Observable
from saq.constants import *
from saq.database import get_db_connection, use_db, acquire_lock, clear_expired_locks, initialize_node
from saq.engine import Engine, DelayedAnalysisRequest, add_workload
from saq.network_client import submit_alerts
from saq.observables import create_observable
from saq.test import *
from saq.util import *
class TestCase(ACEEngineTestCase):
def test_controlled_stop(self):
engine = Engine()
try:
engine.start()
engine.controlled_stop()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def test_immediate_stop(self):
engine = Engine()
try:
engine.start()
engine.stop()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def test_signal_TERM(self):
engine = Engine()
try:
engine.start()
def _send_signal():
wait_for_log_count('waiting for engine process', 1)
os.kill(engine.engine_process.pid, signal.SIGTERM)
t = threading.Thread(target=_send_signal)
t.start()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def test_signal_INT(self):
engine = Engine()
try:
engine.start()
def _send_signal():
wait_for_log_count('waiting for engine process', 1)
os.kill(engine.engine_process.pid, signal.SIGINT)
t = threading.Thread(target=_send_signal)
t.start()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def test_single_process(self):
# test starting and stopping in single-process mode
engine = Engine(single_threaded_mode=True)
try:
engine.start()
except KeyboardInterrupt:
pass
def test_engine_default_pools(self):
# test starting with no analysis pools defined
engine = Engine()
engine.start()
engine.stop()
engine.wait()
# we should see this log message
regex = re.compile(r'no analysis pools defined -- defaulting to (\d+) workers assigned to any pool')
results = search_log_regex(regex)
self.assertEquals(len(results), 1)
m = regex.search(results[0].getMessage())
self.assertIsNotNone(m)
self.assertEquals(int(m.group(1)), cpu_count())
@use_db
def test_acquire_node_id(self, db, c):
engine = Engine()
engine.start()
engine.stop()
engine.wait()
# when an Engine starts up it should acquire a node_id for saq.SAQ_NODE
self.assertIsNotNone(saq.SAQ_NODE_ID)
c.execute("""SELECT name, location, company_id, is_primary, any_mode, is_local
FROM nodes WHERE id = %s""", (saq.SAQ_NODE_ID,))
row = c.fetchone()
self.assertIsNotNone(row)
_name, _location, _company_id, _is_primary, _any_mode, _is_local = row
self.assertEquals(_name, saq.SAQ_NODE)
self.assertEquals(_location, saq.API_PREFIX)
self.assertEquals(_company_id, saq.COMPANY_ID)
#self.assertIsInstance(_any_mode, int)
#self.assertEquals(_any_mode, 0)
self.assertIsInstance(_is_local, int)
self.assertEquals(_is_local, 0)
@use_db
def test_acquire_local_node_id(self, db, c):
engine = Engine()
engine.set_local()
engine.start()
engine.stop()
engine.wait()
# when a local engine starts up it should acquire a local node with a uuid as the name
self.assertIsNotNone(saq.SAQ_NODE_ID)
c.execute("""SELECT name, location, company_id, is_primary, any_mode, is_local
FROM nodes WHERE id = %s""", (saq.SAQ_NODE_ID,))
row = c.fetchone()
from saq.util import validate_uuid
self.assertIsNotNone(row)
_name, _location, _company_id, _is_primary, _any_mode, _is_local = row
self.assertTrue(validate_uuid(_name))
self.assertEquals(_company_id, saq.COMPANY_ID)
#self.assertIsInstance(_any_mode, int)
#self.assertEquals(_any_mode, 0)
self.assertIsInstance(_is_local, int)
self.assertEquals(_is_local, 1)
def test_analysis_modes(self):
engine = TestEngine()
engine.initialize()
engine.initialize_modules()
# analysis mode test_empty should have 0 modules
self.assertEquals(len(engine.analysis_mode_mapping['test_empty']), 0)
engine = TestEngine()
engine.enable_module('analysis_module_basic_test', 'test_empty')
engine.enable_module('analysis_module_test_delayed_analysis', 'test_empty')
engine.enable_module('analysis_module_test_engine_locking', 'test_empty')
engine.enable_module('analysis_module_test_final_analysis', 'test_empty')
engine.enable_module('analysis_module_test_post_analysis', 'test_empty')
engine.initialize()
engine.initialize_modules()
# analysis mode test_single should have 1 module
self.assertEquals(len(engine.analysis_mode_mapping['test_single']), 1)
self.assertEquals(engine.analysis_mode_mapping['test_single'][0].config_section, 'analysis_module_basic_test')
# analysis mode test_groups should have 5 modules
self.assertEquals(len(engine.analysis_mode_mapping['test_groups']), 5)
# analysis mode test_disabled should have 4 modules (minus basic_test)
self.assertEquals(len(engine.analysis_mode_mapping['test_disabled']), 4)
self.assertTrue('analysis_module_basic_test' not in [m.config_section for m in engine.analysis_mode_mapping['test_disabled']])
def test_single_process_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
#engine.controlled_stop() # redundant
engine.single_threaded_start(mode='test_single')
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
def test_multi_process_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
def test_missing_analysis_mode(self):
saq.CONFIG['engine']['default_analysis_mode'] = 'test_single'
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.analysis_mode = None # <-- no analysis mode here
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# the analysis mode should default to test_single
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
#self.assertIsNone(root.analysis_mode)
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
analysis = observable.get_analysis('BasicTestAnalysis')
self.assertIsNotNone(analysis)
def test_invalid_analysis_mode(self):
# an invalid analysis mode happens when you submit an analysis to an engine
# that supports any analysis mode but doesn't have any configuration settings
# for the one that was submitted
# in that case we use the default_analysis_mode
# we're setting the analysis mode to an invalid value
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='foobar')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(local_analysis_modes=[])
engine.default_analysis_mode = 'test_single'
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# the analysis mode should default to test_empty but we should also get a warning
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
self.assertTrue(log_count('invalid analysis mode') > 0)
def test_multi_process_multi_analysis(self):
uuids = []
for _ in range(3):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
uuids.append((root.uuid, observable.id))
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
for root_uuid, observable_uuid in uuids:
root = RootAnalysis(uuid=root_uuid)
root.storage_dir = storage_dir_from_uuid(root_uuid)
root.load()
observable = root.get_observable(observable_uuid)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
def test_no_enabled_modules(self):
# by default the analysis modules specified for the unit tests are disabled (globally)
# so just starting up an engine should load no modules at all
# even though there are modules enabled for the "test_groups" analysis mode
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('loading module '), 0)
def test_globally_enabled_modules(self):
# if we globally enable ALL modules then we should see the correct modules get loaded
for section in saq.CONFIG.keys():
if not section.startswith('analysis_module_'):
continue
saq.CONFIG[section]['enabled'] = 'yes'
# the config file specifies test_empty,test_single,test_groups,test_disabled,test_cleanup as the
# locally supported analysis modes
# so we should see only the modules assigned to these modes get loaded here
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.controlled_stop()
engine.start()
engine.wait()
# TODO kind of annoying I have to edit this every time I add a new module for testing
# there should be 18 analysis modules loaded
self.assertEquals(log_count('loading module '), 19)
def test_locally_enabled_modules(self):
# if we enable modules locally then ONLY those should get loaded
# first we change the config to globally enable all modules
for section in saq.CONFIG.keys():
if not section.startswith('analysis_module_'):
continue
saq.CONFIG[section]['enabled'] = 'yes'
engine = TestEngine(analysis_pools={'test_groups': 1})
# this is the only module that should get loaded
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# even though 5 are specified and globally enabled, only 1 is loaded
self.assertEquals(log_count('loading module '), 1)
self.assertEquals(log_count('loading module analysis_module_basic_test'), 1)
def test_no_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
# this test should return False instead of an Analysis
observable = root.add_observable(F_TEST, 'test_2')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
from saq.modules.test import BasicTestAnalysis
# so this should come back as False
self.assertTrue(isinstance(observable.get_analysis(BasicTestAnalysis), bool))
self.assertFalse(observable.get_analysis(BasicTestAnalysis))
def test_time_range_grouped_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable_1 = root.add_observable(F_TEST, 'test_1', parse_event_time('2019-04-16 12:00:00'))
observable_2 = root.add_observable(F_TEST, 'test_1', parse_event_time('2019-04-16 12:10:00'))
observable_3 = root.add_observable(F_TEST, 'test_1', parse_event_time('2019-04-16 14:00:00'))
observable_4 = root.add_observable(F_TEST, 'test_1', parse_event_time('2019-04-16 10:00:00'))
root.analysis_mode = 'test_groups'
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_grouped_time_range', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
observable_1 = root.get_observable(observable_1.id)
observable_2 = root.get_observable(observable_2.id)
observable_3 = root.get_observable(observable_3.id)
observable_4 = root.get_observable(observable_4.id)
from saq.modules.test import GroupedByTimeRangeAnalysis
# observations 3 and 4 should have analysis
self.assertTrue(bool(observable_3.get_analysis(GroupedByTimeRangeAnalysis)))
self.assertTrue(bool(observable_4.get_analysis(GroupedByTimeRangeAnalysis)))
# either 1 or 2 should have it but not both (logical xor)
self.assertTrue(bool(observable_1.get_analysis(GroupedByTimeRangeAnalysis)) ^ bool(observable_2.get_analysis(GroupedByTimeRangeAnalysis)))
# and one of these should be a grouping target
self.assertTrue(observable_1.grouping_target or observable_2.grouping_target)
# remember which one was the grouping target
grouping_target = observable_1 if observable_1.grouping_target else observable_2
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_grouping_target', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
observable_1 = root.get_observable(observable_1.id)
observable_2 = root.get_observable(observable_2.id)
grouping_target = root.get_observable(grouping_target.id)
from saq.modules.test import GroupingTargetAnalysis
# either 1 or 2 should have it but not both (logical xor)
self.assertTrue(bool(observable_1.get_analysis(GroupingTargetAnalysis)) ^ bool(observable_2.get_analysis(GroupingTargetAnalysis)))
# and the one that was previously marked as the grouping target is the one that should have the analysis
self.assertTrue(bool(grouping_target.get_analysis(GroupingTargetAnalysis)))
def test_no_analysis_no_return(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_3')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
from saq.modules.test import BasicTestAnalysis
# so what happens here is even though you return nothing from execute_analysis
# execute_final_analysis defaults to returning False
self.assertFalse(observable.get_analysis(BasicTestAnalysis))
# you should also get a warning log
wait_for_log_count('is not returning a boolean value', 1, 5)
def test_delayed_analysis_single(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, '0:01|0:05')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import DelayedAnalysisTestAnalysis
root = create_root_analysis(uuid=root.uuid, storage_dir=storage_dir_from_uuid(root.uuid))
root.load()
analysis = root.get_observable(observable.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertIsNotNone(analysis)
self.assertTrue(analysis.initial_request)
self.assertTrue(analysis.delayed_request)
self.assertEquals(analysis.request_count, 2)
self.assertTrue(analysis.completed)
def test_delayed_analysis_multiple(self):
uuids = []
for i in range(3):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, '0:01|0:05')
root.save()
root.schedule()
uuids.append((root.uuid, observable.id))
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import DelayedAnalysisTestAnalysis
for root_uuid, observable_uuid in uuids:
root = create_root_analysis(uuid=root_uuid, storage_dir=storage_dir_from_uuid(root_uuid))
root.load()
analysis = root.get_observable(observable_uuid).get_analysis(DelayedAnalysisTestAnalysis)
self.assertTrue(analysis.initial_request)
self.assertTrue(analysis.delayed_request)
self.assertEquals(analysis.request_count, 2)
self.assertTrue(analysis.completed)
def test_delayed_analysis_timing(self):
root_1 = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root_1.initialize_storage()
o_1 = root_1.add_observable(F_TEST, '0:04|0:10')
root_1.save()
root_1.schedule()
root_2 = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root_2.initialize_storage()
o_2 = root_2.add_observable(F_TEST, '0:01|0:10')
root_2.save()
root_2.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import DelayedAnalysisTestAnalysis
# the second one should finish before the first one
root_1 = RootAnalysis(uuid=root_1.uuid, storage_dir=root_1.storage_dir)
root_1.load()
analysis_1 = root_1.get_observable(o_1.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertTrue(analysis_1.initial_request)
self.assertTrue(analysis_1.delayed_request)
self.assertEquals(analysis_1.request_count, 2)
self.assertTrue(analysis_1.completed)
root_2 = RootAnalysis(uuid=root_2.uuid, storage_dir=root_2.storage_dir)
root_2.load()
analysis_2 = root_2.get_observable(o_2.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertTrue(analysis_2.initial_request)
self.assertTrue(analysis_2.delayed_request)
self.assertEquals(analysis_2.request_count, 2)
self.assertTrue(analysis_2.completed)
self.assertLess(analysis_2.complete_time, analysis_1.complete_time)
def test_unix_signals(self):
engine = TestEngine()
engine.start()
# tell ACE to reload the configuration and then reload all the workers
os.kill(engine.engine_process.pid, signal.SIGHUP)
wait_for_log_count('reloading engine configuration', 1, 5)
wait_for_log_count('got command to restart workers', 1, 5)
wait_for_log_count('started worker loop', 2)
engine.controlled_stop()
engine.wait()
@track_io
def test_io_count(self):
self.assertEquals(_get_io_write_count(), 0)
self.assertEquals(_get_io_read_count(), 0)
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
self.assertEquals(_get_io_write_count(), 1)
self.assertEquals(_get_io_read_count(), 0)
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# at this point it should have loaded the root analysis
# and then saved it again along with the details for the BasicTestAnalysis
self.assertEquals(_get_io_write_count(), 3)
self.assertEquals(_get_io_read_count(), 1)
from saq.modules.test import BasicTestAnalysis
root = create_root_analysis(storage_dir=root.storage_dir)
root.load()
self.assertEquals(_get_io_write_count(), 3)
self.assertEquals(_get_io_read_count(), 2)
analysis = root.get_observable(observable.id).get_analysis(BasicTestAnalysis)
self.assertEquals(_get_io_read_count(), 2) # should not have loaded details yet...
self.assertTrue(analysis.test_result)
self.assertEquals(_get_io_read_count(), 3)
@track_io
def test_delayed_analysis_io_count(self):
self.assertEquals(_get_io_write_count(), 0)
self.assertEquals(_get_io_read_count(), 0)
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, '00:01|00:05')
root.save()
root.schedule()
self.assertEquals(_get_io_write_count(), 1)
self.assertEquals(_get_io_read_count(), 0)
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
# expect 5 writes at this point
# (1) initial root analysis save
# (2) initial module save
# (3) root analysis completed save
# (4) updated module save
# (5) root analysis completed save
self.assertEquals(_get_io_write_count(), 5)
# and then 4 reads (one LOAD for each, iterated twice)
self.assertEquals(_get_io_read_count(), 3)
from saq.modules.test import DelayedAnalysisTestAnalysis
root = create_root_analysis(uuid=root.uuid)
self.assertTrue(root.load())
self.assertEquals(_get_io_write_count(), 5)
self.assertEquals(_get_io_read_count(), 4)
analysis = root.get_observable(observable.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertIsNotNone(analysis)
self.assertEquals(_get_io_read_count(), 4) # should not have loaded details yet...
self.assertTrue(analysis.delayed_request)
self.assertEquals(_get_io_read_count(), 5)
def test_autorefresh(self):
saq.CONFIG['engine']['auto_refresh_frequency'] = '3'
engine = TestEngine(pool_size_limit=1)
engine.start()
wait_for_log_count('triggered reload of worker modules', 1)
wait_for_log_count('detected death of process', 1)
engine.controlled_stop()
engine.wait()
def test_memory_limit(self):
from saq.database import Workload, Lock
# reduce the limits so the test is easier
saq.CONFIG['global']['memory_limit_warning'] = '128'
saq.CONFIG['global']['memory_limit_kill'] = '256'
root = create_root_analysis()
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_memory_limit_warning')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.start()
time.sleep(3)
engine.controlled_stop()
engine.wait()
# we should see a warning message about taking up too much memory
wait_for_log_count('is using too much memory', 1)
# same thing as before except we allocate so much memory we force ace to kill the process
root = create_root_analysis()
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_memory_limit_kill')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.start()
time.sleep(3)
engine.controlled_stop()
engine.wait()
# we should see a warning message about taking up too much memory
wait_for_log_count('used too much memory', 1, 10)
# we should NOT see a workload item or a lock left
self.assertEquals(saq.db.query(Workload.id).count(), 0)
self.assertEquals(saq.db.query(Lock.uuid).count(), 0)
def test_final_analysis(self):
"""Test final analysis execution."""
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_final_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
# we should have a single observable now
root = create_root_analysis(uuid=root.uuid)
root.load()
self.assertEquals(len(root.all_observables), 1)
self.assertTrue(root.has_observable(F_TEST, 'test'))
from saq.modules.test import FinalAnalysisTestAnalysis
analysis = root.get_observable(observable.id).get_analysis(FinalAnalysisTestAnalysis)
self.assertIsNotNone(analysis)
# we should have seen this twice since the modification of adding an analysis will triggert
# final analysis again
self.assertEquals(log_count('entering final analysis for '), 2)
@track_io
def test_final_analysis_io_count(self):
self.assertEquals(_get_io_write_count(), 0)
self.assertEquals(_get_io_read_count(), 0)
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
self.assertEquals(_get_io_write_count(), 1)
self.assertEquals(_get_io_read_count(), 0)
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_final_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(_get_io_write_count(), 3)
self.assertEquals(_get_io_read_count(), 1)
self.assertEquals(log_count('entering final analysis for '), 2)
@track_io
def test_final_analysis_io_count_2(self):
"""Same thing as before but we test with multiple observables."""
self.assertEquals(_get_io_write_count(), 0)
self.assertEquals(_get_io_read_count(), 0)
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable_1 = root.add_observable(F_TEST, 'test_01')
observable_2 = root.add_observable(F_TEST, 'test_02')
root.save()
root.schedule()
self.assertEquals(_get_io_write_count(), 1)
self.assertEquals(_get_io_read_count(), 0)
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_final_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(_get_io_write_count(), 4)
self.assertEquals(_get_io_read_count(), 1)
self.assertEquals(log_count('entering final analysis for '), 3)
# ensure that post analysis is executed even if delayed analysis times out
def test_delayed_analysis_timeout(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
test_observable = root.add_observable(F_TEST, '0:01|0:01')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis_timeout', 'test_groups')
engine.enable_module('analysis_module_test_post_analysis', 'test_groups')
engine.start()
# wait for delayed analysis to time out
wait_for_log_count('has timed out', 1)
engine.controlled_stop()
engine.wait()
# post analysis should have executed
self.assertEquals(log_count('execute_post_analysis called'), 1)
def test_delayed_analysis_recovery(self):
from saq.database import DelayedAnalysis, Workload
# scenario: delayed analysis starts, ace engine stops and then starts back up
# the delayed analysis should pick back up and complete
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, '0:05|0:10')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
# wait until we see the delay in the queue
wait_for_log_count('queue sizes workload 0 delayed 1', 1)
# now kill the engine
engine.stop()
engine.wait()
# we should have one delayed analysis still in the queue
self.assertEquals(saq.db.query(DelayedAnalysis.id).count(), 1)
# and nothing in the workload queue
self.assertEquals(saq.db.query(Workload.id).count(), 0)
# start another engine back up
engine = TestEngine()
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import DelayedAnalysisTestAnalysis
root = create_root_analysis(uuid=root.uuid, storage_dir=storage_dir_from_uuid(root.uuid))
root.load()
analysis = root.get_observable(observable.id).get_analysis(DelayedAnalysisTestAnalysis)
self.assertIsNotNone(analysis)
self.assertTrue(analysis.initial_request)
self.assertTrue(analysis.delayed_request)
self.assertEquals(analysis.request_count, 2)
self.assertTrue(analysis.completed)
# queue should be empty
saq.db.close()
self.assertEquals(saq.db.query(DelayedAnalysis.id).count(), 0)
self.assertEquals(saq.db.query(Workload.id).count(), 0)
def test_wait_for_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(uuid=root.uuid, storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_B))
self.assertEquals(log_count("depends on"), 1)
def test_wait_for_disabled_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
#engine.enable_module('analysis_module_test_wait_b')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_B))
#self.assertEquals(log_count("requested to wait for disabled (or missing) module"), 1)
self.clear_error_reports()
def test_wait_for_analysis_circ_dep(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_2')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_B))
self.assertEquals(log_count("CIRCULAR DEPENDENCY ERROR"), 1)
def test_wait_for_analysis_missing_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_3')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertFalse(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_B))
# we would only see this log if A waited on B
#self.assertEquals(log_count("did not generate analysis to resolve dep"), 1)
def test_wait_for_analysis_circ_dep_chained(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_4')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.enable_module('analysis_module_test_wait_c', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B, WaitAnalysis_C
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_B))
self.assertIsNone(test_observable.get_analysis(WaitAnalysis_C))
self.assertEquals(log_count("CIRCULAR DEPENDENCY ERROR"), 1)
def test_wait_for_analysis_chained(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_5')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.enable_module('analysis_module_test_wait_c', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B, WaitAnalysis_C
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_B))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_C))
self.assertEquals(log_count("CIRCULAR DEPENDENCY ERROR"), 0)
def test_wait_for_analysis_delayed(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_6')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_B))
def test_wait_for_analysis_rejected(self):
from saq.modules.test import WaitAnalysis_A, WaitAnalysis_B, WaitAnalysis_C, \
WaitAnalyzerModule_B
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_engine_032a')
test_observable.exclude_analysis(WaitAnalyzerModule_B)
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_wait_a', 'test_groups')
engine.enable_module('analysis_module_test_wait_b', 'test_groups')
engine.enable_module('analysis_module_test_wait_c', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_A))
self.assertFalse(test_observable.get_analysis(WaitAnalysis_B))
self.assertIsNotNone(test_observable.get_analysis(WaitAnalysis_C))
def test_post_analysis_after_false_return(self):
# the execute_post_analysis function should be called regardless of what happened during analysis
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_post_analysis', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
from saq.modules.test import PostAnalysisTestResult
self.assertFalse(test_observable.get_analysis(PostAnalysisTestResult))
self.assertEquals(log_count('execute_post_analysis called'), 1)
def test_maximum_cumulative_analysis_warning_time(self):
# setting this to zero should cause it to happen right away
saq.CONFIG['global']['maximum_cumulative_analysis_warning_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('ACE has been analyzing'), 1)
def test_maximum_cumulative_analysis_warning_time_analysis_mode(self):
# same thing as before except we set the timeout for just the analysis mode
# setting this to zero should cause it to happen right away
saq.CONFIG['analysis_mode_test_groups']['maximum_cumulative_analysis_warning_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('ACE has been analyzing'), 1)
def test_maximum_cumulative_analysis_fail_time(self):
# setting this to zero should cause it to happen right away
saq.CONFIG['global']['maximum_cumulative_analysis_fail_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('ACE took too long to analyze'), 1)
def test_maximum_cumulative_analysis_fail_time_analysis_mode(self):
# same thing as before except we set the timeout for just the analysis mode
# setting this to zero should cause it to happen right away
saq.CONFIG['analysis_mode_test_groups']['maximum_cumulative_analysis_fail_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('ACE took too long to analyze'), 1)
def test_maximum_analysis_time(self):
# setting this to zero should cause it to happen right away
saq.CONFIG['global']['maximum_analysis_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_4')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
# will fire again in final analysis
self.assertEquals(log_count('excessive time - analysis module'), 2)
def test_maximum_analysis_time_analysis_mode(self):
# same thing as before except we set the timeout for just the analysis mode
# setting this to zero should cause it to happen right away
saq.CONFIG['analysis_mode_test_groups']['maximum_analysis_time'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_4')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
# will fire again in final analysis
self.assertEquals(log_count('excessive time - analysis module'), 2)
def test_is_module_enabled(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_dependency_test', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
from saq.modules.test import DependencyTestAnalysis, KEY_SUCCESS, KEY_FAIL
analysis = test_observable.get_analysis(DependencyTestAnalysis)
for key in analysis.details[KEY_SUCCESS].keys():
self.assertTrue(analysis.details[KEY_SUCCESS][key])
for key in analysis.details[KEY_FAIL].keys():
self.assertFalse(analysis.details[KEY_FAIL][key])
def test_analysis_mode_priority(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
test_1_uuid = root.uuid
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_2')
root.save()
root.schedule()
test_2_uuid = root.uuid
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# we should see test_2_uuid get selected BEFORE test_1_uuid gets selected
results = [_.getMessage() for _ in search_log('got work item')]
self.assertEquals(len(results), 2)
self.assertEquals(results.index('got work item RootAnalysis({})'.format(test_2_uuid)), 0)
def test_analysis_mode_no_priority(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
test_1_uuid = root.uuid
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_2')
root.save()
root.schedule()
test_2_uuid = root.uuid
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# since we don't have any kind of priority set they should get selected in order they were inserted (FIFO)
# so we should see test_1_uuid get selected BEFORE test_2_uuid gets selected
results = [_.getMessage() for _ in search_log('got work item')]
self.assertEquals(len(results), 2)
self.assertEquals(results.index('got work item RootAnalysis({})'.format(test_1_uuid)), 0)
def test_merge(self):
# first analysis
root_1 = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root_1.initialize_storage()
test_observable_1 = root_1.add_observable(F_TEST, 'test_1')
existing_user_observable = root_1.add_observable(F_USER, 'admin')
root_1.save()
root_1.schedule()
# second analysis we want to merge into the first
root_2 = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root_2.initialize_storage()
test_observable_2 = root_2.add_observable(F_TEST, 'merge_test_1')
root_2.save()
root_2.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.enable_module('analysis_module_merge_test')
engine.controlled_stop()
engine.start()
engine.wait()
from saq.modules.test import BasicTestAnalysis, MergeTestAnalysis
root_1.load()
test_observable_1 = root_1.get_observable(test_observable_1.id)
self.assertIsNotNone(test_observable_1)
basic_analysis = test_observable_1.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(basic_analysis)
root_2.load()
root_1.merge(basic_analysis, root_2)
root_1.save()
# now the basic analysis should have the test_observable_2
test_observable_2 = root_1.get_observable(test_observable_2.id)
self.assertIsNotNone(test_observable_2)
# and it should have the merge analysis
merge_analysis = test_observable_2.get_analysis(MergeTestAnalysis)
self.assertIsNotNone(merge_analysis)
# and that should have a new observable of it's own
output_observable = merge_analysis.get_observables_by_type(F_TEST)
self.assertEquals(len(output_observable), 1)
output_observable = output_observable[0]
self.assertEquals(output_observable.value, 'test_output')
self.assertTrue(output_observable.has_tag('test'))
# there should also be a file observable
file_observable = merge_analysis.get_observables_by_type(F_FILE)
self.assertEquals(len(file_observable), 1)
file_observable = file_observable[0]
with open(os.path.join(root_1.storage_dir, file_observable.value), 'r') as fp:
self.assertEquals(fp.read(), 'test')
# that should have a relationship to a URL observable
self.assertEquals(len(file_observable.relationships), 1)
self.assertEquals(file_observable.relationships[0].r_type, R_DOWNLOADED_FROM)
url_observable = file_observable.relationships[0].target
self.assertTrue(isinstance(url_observable, Observable))
self.assertTrue(url_observable.value, F_URL)
# we also merged an existing observable
# so we should see this observable twice
existing_observable = root_1.get_observable(existing_user_observable.id)
self.assertIsNotNone(existing_observable)
instance_copy = merge_analysis.get_observables_by_type(F_USER)
self.assertEquals(len(instance_copy), 1)
self.assertEquals(instance_copy[0].id, existing_observable.id)
def test_error_reporting(self):
# trigger the failure this way
saq.CONFIG['global']['maximum_cumulative_analysis_fail_time'] = '0'
# remember what was already in the error reporting directory
def _enum_error_reporting():
return set(os.listdir(os.path.join(saq.DATA_DIR, 'error_reports')))
existing_reports = _enum_error_reporting()
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_3')
root.save()
root.schedule()
engine = TestEngine()
engine.copy_analysis_on_error = True
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# look at what is in the error reporting directory now
# exclude what we found before to find the new stuff
new_reports = _enum_error_reporting() - existing_reports
# we should have a single error report and a single storage directory in the error reporting directory
self.assertEquals(len(new_reports), 2)
# one should be a file and the other a directory
file_path = None
dir_path = None
for _file in new_reports:
path = os.path.join(os.path.join(saq.DATA_DIR, 'error_reports', _file))
if os.path.isfile(path):
file_path = path
if os.path.isdir(path):
dir_path = path
self.assertIsNotNone(file_path)
self.assertIsNotNone(dir_path)
# check that everything we expect to exist in the dir exists
self.assertTrue(os.path.exists(os.path.join(dir_path, 'data.json')))
self.assertTrue(os.path.exists(os.path.join(dir_path, 'saq.log')))
self.assertTrue(os.path.isdir(os.path.join(dir_path, 'stats')))
self.assertTrue(os.path.isdir(os.path.join(dir_path, '.ace')))
# go ahead and remove these since we check for them after running tests to review actual error reports
shutil.rmtree(dir_path)
os.remove(file_path)
def test_stats(self):
# clear engine statistics
if os.path.exists(os.path.join(saq.MODULE_STATS_DIR, 'ace')):
shutil.rmtree(os.path.join(saq.MODULE_STATS_DIR, 'ace'))
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# there should be one subdir in the engine's stats dir
self.assertEquals(len(os.listdir(os.path.join(saq.MODULE_STATS_DIR, 'ace'))), 1)
subdir = os.listdir(os.path.join(saq.MODULE_STATS_DIR, 'ace'))
subdir = subdir[0]
# this should have a single stats file in it
stats_files = os.listdir(os.path.join(os.path.join(saq.MODULE_STATS_DIR, 'ace', subdir)))
self.assertEquals(len(stats_files), 1)
# and it should not be empty
self.assertGreater(os.path.getsize(os.path.join(os.path.join(saq.MODULE_STATS_DIR, 'ace',
subdir, stats_files[0]))), 0)
def test_exclusion(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_6')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
# we should have two that were both excluded in different ways
self.assertEquals(len(analysis.observables), 2)
for new_observable in analysis.observables:
new_observable = analysis.observables[0]
new_analysis = new_observable.get_analysis(BasicTestAnalysis)
self.assertFalse(new_analysis)
def test_limited_analysis(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
observable.limit_analysis('basic_test')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.enable_module('analysis_module_test_delayed_analysis')
engine.enable_module('analysis_module_test_engine_locking')
engine.enable_module('analysis_module_test_final_analysis')
engine.enable_module('analysis_module_test_post_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
# there should only be one analysis performed
self.assertEquals(len(observable.all_analysis), 1)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
self.assertTrue(len(search_log('analysis for test(test_1) limited to 1 modules (basic_test)')) > 0)
def test_limited_analysis_invalid(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
observable.limit_analysis('basic_tast') # mispelled test
root.save()
root.schedule()
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.enable_module('analysis_module_test_delayed_analysis')
engine.enable_module('analysis_module_test_engine_locking')
engine.enable_module('analysis_module_test_final_analysis')
engine.enable_module('analysis_module_test_post_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
# there should be no analysis
self.assertEquals(len(observable.all_analysis), 0)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNone(analysis)
self.assertTrue(len(search_log('specified unknown limited analysis')) > 0)
#def test_cleanup(self):
#root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_cleanup')
#root.initialize_storage()
#root.save()
#root.schedule()
#engine = TestEngine()
#engine.controlled_stop()
#engine.start()
#engine.wait()
#self.assertFalse(os.path.isdir(root.storage_dir))
def test_cleanup_alt_workdir(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_cleanup')
root.storage_dir = workload_storage_dir(root.uuid)
root.initialize_storage()
root.save()
root.schedule()
engine = TestEngine()
engine.controlled_stop()
engine.start()
engine.wait()
self.assertFalse(os.path.isdir(root.storage_dir))
def test_no_cleanup(self):
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_empty')
root.initialize_storage()
root.save()
root.schedule()
engine = TestEngine()
engine.controlled_stop()
engine.start()
engine.wait()
self.assertTrue(os.path.isdir(root.storage_dir))
def test_cleanup_with_delayed_analysis(self):
# we are set to cleanup, however, we don't because we have delayed analysis
saq.CONFIG['analysis_mode_test_groups']['cleanup'] = 'yes'
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_groups')
root.initialize_storage()
observable = root.add_observable(F_TEST, '00:01|00:05')
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_test_delayed_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertFalse(os.path.isdir(root.storage_dir))
self.assertEquals(log_count('not cleaning up RootAnalysis({}) (found outstanding work)'.format(root.uuid)), 1)
def test_local_analysis_mode_single(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(local_analysis_modes=['test_groups'], pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
def test_local_analysis_mode_missing_default(self):
saq.CONFIG['engine']['default_analysis_mode'] = 'test_single'
# when we specify a default analysis mode that is not in the locally supported modes of the engine
# it should automatically get added to the list of locally supported modes
# we specify test_single as the supported local analysis mode, but the default is test_empty
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.analysis_mode = 'test_single'
root.save()
root.schedule()
engine = TestEngine(local_analysis_modes=['test_empty'],
pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
#self.assertIsNotNone(analysis)
# both test_empty and test_single should be in this list
self.assertEquals(len(engine.local_analysis_modes), 2)
self.assertTrue('test_single' in engine.local_analysis_modes)
self.assertTrue('test_empty' in engine.local_analysis_modes)
def test_local_analysis_mode_missing_pool(self):
saq.CONFIG['engine']['default_analysis_mode'] = 'test_empty'
# test_empty is specified as the only supported mode
# but we specify a pool for test_single
# this is a configuration error
engine = TestEngine(local_analysis_modes=['test_empty'],
analysis_pools={'test_single': 1})
wait_for_log_count('attempted to add analysis pool for mode test_single which is not supported by this engine', 1, 5)
def test_local_analysis_mode_not_local(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
# but we target test_single for this analysis
root.analysis_mode = 'test_single'
root.save()
root.schedule()
# we say we only support test_empty analysis modes
engine = TestEngine(local_analysis_modes=['test_empty'])
engine.enable_module('analysis_module_basic_test', 'test_empty')
engine.controlled_stop()
engine.start()
engine.wait()
# this should exit out since the workload entry is for test_single analysis mode
# but we don't support that with this engine so it shouldn't see it
def test_local_analysis_mode_remote_pickup(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
# but we target test_single for this analysis
root.analysis_mode = 'test_single'
root.save()
root.schedule()
# remember the old storage dir
old_storage_dir = root.storage_dir
# we say we only support test_empty analysis modes
engine = TestEngine(local_analysis_modes=['test_empty'],
analysis_pools={'test_empty': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
# this should exist out since we don't support this analysis mode with this engine instance
engine.wait()
# make sure our stuff is still there
self.assertTrue(os.path.exists(old_storage_dir))
# start an api server for this node
self.start_api_server()
self.reset_config()
# now start another engine on a different "node"
saq.CONFIG['global']['node'] = 'second_host'
saq.set_node('second_host')
saq.CONFIG['analysis_mode_test_single']['cleanup'] = 'no'
# and this node handles the test_single mode
saq.CONFIG['engine']['local_analysis_modes'] = 'test_single'
saq.CONFIG['engine']['analysis_pool_size_test_single'] = '1'
engine = TestEngine()
engine.enable_module('analysis_module_basic_test')
engine.start()
# since this is remote we can't use the technique where we call controlled_stop and
# wait for the queues to empty because only the local queue is checked (which is currently empty)
# look for the log to move the work target
wait_for_log_count('downloading work target {} from '.format(root.uuid), 1, 5)
wait_for_log_count('completed analysis RootAnalysis({})'.format(root.uuid), 1, 5)
engine.controlled_stop()
engine.wait()
# now the old storage directory should be gone
self.assertFalse(os.path.exists(old_storage_dir))
# but there should be a new one in the new "node"
root = RootAnalysis(storage_dir=storage_dir_from_uuid(root.uuid))
root.load()
observable = root.get_observable(observable.id)
self.assertIsNotNone(observable)
from saq.modules.test import BasicTestAnalysis
analysis = observable.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
@use_db
def test_local_analysis_mode_remote_pickup_invalid_company_id(self, db, c):
# TestCase - we've got nothing to do locally but there is work
# on a remote server, but that work is assigned to a different company
# we do NOT grab that work
# first we add a new company
c.execute("INSERT INTO company ( name ) VALUES ( 'unittest' )")
db.commit()
# get the new company_id
c.execute("SELECT id FROM company WHERE name = 'unittest'")
row = c.fetchone()
self.assertIsNotNone(row)
other_company_id = row[0]
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
# but we target test_single for this analysis
root.analysis_mode = 'test_single'
root.company_id = other_company_id
root.save()
root.schedule()
# remember the old storage dir
old_storage_dir = root.storage_dir
# we say we only support test_empty analysis modes
engine = TestEngine(local_analysis_modes=['test_empty'],
analysis_pools={'test_empty': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
# this should exit out since we do not support this analysis mode with this engine
engine.wait()
# make sure our stuff is still there
self.assertTrue(os.path.exists(old_storage_dir))
# start an api server for this node
self.start_api_server()
self.reset_config()
# now start another engine on a different "node"
saq.CONFIG['global']['node'] = 'second_host'
saq.set_node('second_host')
saq.CONFIG['analysis_mode_test_single']['cleanup'] = 'no'
# and this node handles the test_single mode
saq.CONFIG['engine']['local_analysis_modes'] = 'test_single'
saq.CONFIG['engine']['analysis_pool_size_test_single'] = '1'
engine = TestEngine(local_analysis_modes=['test_single'],
analysis_pools={'test_single': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
# we should see the same thing happen since the remote work is assigned to the other company
engine.wait()
# make sure our stuff is still there
self.assertTrue(os.path.exists(old_storage_dir))
@use_db
def test_status_update(self, db, c):
# start an empty engine and wait for the node update
engine = TestEngine()
engine.start()
wait_for_log_count('updated node', 1, 5)
# do we have an entry in the nodes database table?
c.execute("SELECT name, location, company_id, last_update FROM nodes WHERE id = %s", (saq.SAQ_NODE_ID,))
row = c.fetchone()
self.assertIsNotNone(row)
self.assertEquals(row[0], saq.SAQ_NODE)
self.assertEquals(row[1], saq.API_PREFIX)
self.assertEquals(row[2], saq.COMPANY_ID)
engine.stop()
engine.wait()
@use_db
def test_node_modes_update(self, db, c):
# when an Engine starts up it updates the node_modes database with the list of analysis modes it locally supports
# configure to support two modes
engine = TestEngine(local_analysis_modes=['test_empty', 'test_single'])
engine.controlled_stop()
engine.start()
engine.wait()
# we should have two entries in the node_modes database for the current node_id
c.execute("SELECT analysis_mode FROM node_modes WHERE node_id = %s ORDER BY analysis_mode ASC", (saq.SAQ_NODE_ID,))
self.assertEquals(c.fetchone(), ('test_empty',))
self.assertEquals(c.fetchone(), ('test_single',))
# and the any_mode column should be 0 for this node
c.execute("SELECT any_mode FROM nodes WHERE id = %s", (saq.SAQ_NODE_ID,))
self.assertEquals(c.fetchone(), (0,))
@use_db
def test_node_modes_update_any(self, db, c):
# when an Engine starts up it updates the node_modes database with the list of analysis modes it locally supports
# configure to support two modes
engine = TestEngine(local_analysis_modes=[])
engine.controlled_stop()
engine.start()
engine.wait()
# we should have NO entries in the node_modes database for the current node_id
c.execute("SELECT analysis_mode FROM node_modes WHERE node_id = %s ORDER BY analysis_mode ASC", (saq.SAQ_NODE_ID,))
self.assertIsNone(c.fetchone())
# and the any_mode column should be 1 for this node
c.execute("SELECT any_mode FROM nodes WHERE id = %s", (saq.SAQ_NODE_ID,))
self.assertEquals(c.fetchone(), (1,))
@use_db
def test_primary_node(self, db, c):
# test having a node become the primary node
saq.CONFIG['engine']['node_status_update_frequency'] = '1'
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
c.execute("SELECT name FROM nodes WHERE id = %s AND is_primary = 1", (saq.SAQ_NODE_ID,))
self.assertIsNotNone(c.fetchone())
engine.stop()
engine.wait()
@use_db
def test_primary_node_contest(self, db, c):
# test having a node become the primary node
# and then another node NOT becoming a primary node because there already is one
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
c.execute("SELECT name FROM nodes WHERE id = %s AND is_primary = 1", (saq.SAQ_NODE_ID,))
self.assertIsNotNone(c.fetchone())
engine.stop()
engine.wait()
saq.set_node('another_node')
engine = TestEngine()
engine.start()
wait_for_log_count('node {} is not primary'.format(saq.SAQ_NODE), 1, 5)
engine.stop()
engine.wait()
@use_db
def test_primary_node_contest_winning(self, db, c):
# test having a node become the primary node
# after another node times out
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
c.execute("SELECT name FROM nodes WHERE id = %s AND is_primary = 1", (saq.SAQ_NODE_ID,))
self.assertIsNotNone(c.fetchone())
engine.stop()
engine.wait()
# update the node to make it look like it last updated a while ago
c.execute("UPDATE nodes SET last_update = ADDTIME(last_update, '-1:00:00') WHERE id = %s", (saq.SAQ_NODE_ID,))
db.commit()
c.execute("SELECT last_update FROM nodes WHERE id = %s", (saq.SAQ_NODE_ID,))
saq.set_node('another_node')
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
engine.stop()
engine.wait()
@use_db
def test_primary_node_clear_locks(self, db, c):
target = str(uuid.uuid4())
lock_uuid = str(uuid.uuid4())
self.assertTrue(acquire_lock(target, lock_uuid))
saq.LOCK_TIMEOUT_SECONDS = 0
# test having a node become the primary node
# and then clearing out an expired lock
engine = TestEngine()
engine.start()
wait_for_log_count('this node {} has become the primary node'.format(saq.SAQ_NODE), 1, 5)
wait_for_log_count('removed 1 expired locks', 1, 5)
engine.stop()
engine.wait()
# make sure the lock is gone
c.execute("SELECT uuid FROM locks WHERE uuid = %s", (target,))
self.assertIsNone(c.fetchone())
@use_db
def test_primary_node_clear_expired_local_nodes(self, db, c):
# create a local node and have it expire
engine = TestEngine()
engine.set_local()
engine.controlled_stop()
engine.start()
engine.stop()
c.execute("UPDATE nodes SET last_update = ADDTIME(last_update, '-1:00:00') WHERE id = %s", (saq.SAQ_NODE_ID,))
db.commit()
saq.set_node('another_node')
engine = TestEngine()
engine.start()
wait_for_log_count('removed 1 expired local nodes', 1, 5)
engine.stop()
engine.wait()
def test_threaded_analysis_module(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_threaded_test')
engine.controlled_stop()
engine.start()
# we should see this execute at least once
wait_for_log_count('threaded execution called', 1, 5)
engine.wait()
def test_threaded_analysis_module_broken(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_1')
root.save()
root.schedule()
# have this fail after 1 second of waiting
saq.EXECUTION_THREAD_LONG_TIMEOUT = 1
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_threaded_test_broken')
engine.start()
wait_for_log_count('is not stopping', 1, 6)
wait_for_log_count('failing to stop - process dying', 1, 10)
engine.stop()
engine.wait()
def test_engine_worker_recovery(self):
# make sure the engine detects dead workers and replaces them
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_worker_death')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.start()
# we should see it die
wait_for_log_count('detected death of', 1, 5)
# and then we should have seen two workers start
wait_for_log_count('started worker loop', 2, 5)
engine.stop()
engine.wait()
@use_db
def test_engine_exclusive_uuid(self, db, c):
exclusive_uuid = str(uuid.uuid4())
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
root.save()
root.schedule(exclusive_uuid)
c.execute("SELECT exclusive_uuid FROM workload WHERE uuid = %s", (root.uuid,))
row = c.fetchone()
self.assertIsNotNone(row)
self.assertEquals(row[0], exclusive_uuid)
# this engine should NOT process the work item
# since the exclusive_uuid is NOT set
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.start()
# we should see this a bunch of times
wait_for_log_count('workload.exclusive_uuid IS NULL', 3, 5)
self.assertEquals(log_count('queue sizes workload 1 delayed 0'), 0)
engine.stop()
engine.wait()
# this engine should process the work item
engine = TestEngine(pool_size_limit=1)
engine.exclusive_uuid = exclusive_uuid
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
@use_db
def test_clear_outstanding_locks(self, db, c):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
root.add_observable(F_TEST, 'test_never_return')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.initialize() # get the node created
# create an arbitrary lock
from saq.database import acquire_lock
self.assertTrue(acquire_lock(str(uuid.uuid4()), str(uuid.uuid4()), f'{saq.SAQ_NODE}-unittest-12345'))
self.assertTrue(acquire_lock(str(uuid.uuid4()), str(uuid.uuid4()), f'some_other_node.local-unittest-12345'))
# should have two locks now
c.execute("SELECT COUNT(*) FROM locks")
self.assertEquals(c.fetchone()[0], 2)
db.commit()
# initialize the engine again
engine = TestEngine(pool_size_limit=1)
engine.initialize()
# should see a logging message about locks being deleted
wait_for_log_count('clearing 1 locks from previous execution', 1, 5)
# we should have one lock left, belong to the "other node"
c.execute("SELECT lock_owner FROM locks")
self.assertEquals(c.fetchone()[0], 'some_other_node.local-unittest-12345')
def test_action_counters(self):
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
t1 = root.add_observable(F_TEST, 'test_action_counter_1')
t2 = root.add_observable(F_TEST, 'test_action_counter_2')
t3 = root.add_observable(F_TEST, 'test_action_counter_3')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# we have an action count limit of 2, so 2 of these should have analysis and 1 should not
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
t1 = root.get_observable(t1.id)
t2 = root.get_observable(t2.id)
t3 = root.get_observable(t3.id)
self.assertIsNotNone(t1)
self.assertIsNotNone(t2)
self.assertIsNotNone(t3)
from saq.modules.test import BasicTestAnalysis
analysis_count = 0
for t in [ t1, t2, t3 ]:
if t.get_analysis(BasicTestAnalysis):
analysis_count += 1
self.assertEquals(analysis_count, 2)
def test_module_priority(self):
root = create_root_analysis()
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
t1 = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_high_priority')
engine.enable_module('analysis_module_low_priority')
engine.controlled_stop()
engine.start()
engine.wait()
# we should see the high priority execute before the low priority
hp_log_entry = search_log('analyzing test(test) with HighPriorityAnalyzer')
self.assertEquals(len(hp_log_entry), 1)
hp_log_entry = hp_log_entry[0]
lp_log_entry = search_log('analyzing test(test) with LowPriorityAnalyzer')
self.assertEquals(len(lp_log_entry), 1)
lp_log_entry = lp_log_entry[0]
self.assertLess(hp_log_entry.created, lp_log_entry.created)
# swap the priorities
saq.CONFIG['analysis_module_high_priority']['priority'] = '1'
saq.CONFIG['analysis_module_low_priority']['priority'] = '0'
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
t1 = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_high_priority')
engine.enable_module('analysis_module_low_priority')
engine.controlled_stop()
engine.start()
engine.wait()
# we should see the high priority execute before the low priority
hp_log_entry = search_log('analyzing test(test) with HighPriorityAnalyzer')
self.assertEquals(len(hp_log_entry), 2)
hp_log_entry = hp_log_entry[1]
lp_log_entry = search_log('analyzing test(test) with LowPriorityAnalyzer')
self.assertEquals(len(lp_log_entry), 2)
lp_log_entry = lp_log_entry[1]
self.assertLess(lp_log_entry.created, hp_log_entry.created)
# test a high priority analysis against an analysis without a priority
saq.CONFIG['analysis_module_high_priority']['priority'] = '0'
del saq.CONFIG['analysis_module_low_priority']['priority']
root = create_root_analysis(uuid=str(uuid.uuid4()))
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
t1 = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
saq.CONFIG['analysis_module_high_priority']['priority'] = '-1'
saq.CONFIG['analysis_module_low_priority']['priority'] = '1'
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_high_priority')
engine.enable_module('analysis_module_low_priority')
engine.enable_module('analysis_module_no_priority')
engine.controlled_stop()
engine.start()
engine.wait()
# we should see the high priority execute before the low priority
hp_log_entry = search_log('analyzing test(test) with HighPriorityAnalyzer')
self.assertEquals(len(hp_log_entry), 3)
hp_log_entry = hp_log_entry[2]
lp_log_entry = search_log('analyzing test(test) with LowPriorityAnalyzer')
self.assertEquals(len(lp_log_entry), 3)
lp_log_entry = lp_log_entry[2]
np_log_entry = search_log('analyzing test(test) with NoPriorityAnalyzer')
self.assertEquals(len(np_log_entry), 1)
np_log_entry = np_log_entry[0]
self.assertLess(hp_log_entry.created, lp_log_entry.created)
self.assertLess(lp_log_entry.created, np_log_entry.created)
def test_post_analysis_multi_mode(self):
root = create_root_analysis(analysis_mode='test_groups')
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
t1 = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1, local_analysis_modes=['test_groups', 'test_single', 'test_empty'])
engine.enable_module('analysis_module_post_analysis_multi_mode', ['test_groups', 'test_single', 'test_empty'])
engine.controlled_stop()
engine.start()
engine.wait()
# at the end of analysi sin test_groups mode post_analysis will execute and change the mode to test_single
# it will happen again and change the mode to test_empty but will return True indicating post analysis has completed
# so we should see the "execute_post_analysis called" message twice but not three times
self.assertEquals(log_count('execute_post_analysis called'), 2)
self.assertEquals(log_count('executing post analysis routines for'), 3)
def test_post_analysis_delayed_analysis(self):
root = create_root_analysis()
root.storage_dir = storage_dir_from_uuid(root.uuid)
root.initialize_storage()
t1 = root.add_observable(F_TEST, 'test_delayed')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_test_post_analysis')
engine.controlled_stop()
engine.start()
engine.wait()
self.assertEquals(log_count('execute_post_analysis called'), 1)
self.assertEquals(log_count('executing post analysis routines for'), 1)
def test_alt_workload_move(self):
# when an analysis moves into alert (correlation) mode and we are using an alt workload dir
# then that analysis should move into the saq.DATA_DIR directory
root = create_root_analysis()
root.storage_dir = workload_storage_dir(root.uuid)
root.initialize_storage()
t1 = root.add_observable(F_TEST, 'test')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_alerting()
engine.enable_module('analysis_module_forced_detection', 'test_groups')
engine.controlled_stop()
engine.start()
engine.wait()
def test_analysis_reset(self):
root = create_root_analysis()
root.initialize_storage()
o1 = root.add_observable(F_TEST, 'test_add_file')
o2 = root.add_observable(F_TEST, 'test_action_counter')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
o1 = root.get_observable(o1.id)
self.assertIsNotNone(o1)
from saq.modules.test import BasicTestAnalysis
analysis = o1.get_analysis(BasicTestAnalysis)
self.assertIsNotNone(analysis)
# this analysis should have two file observables
file_observables = analysis.find_observables(lambda o: o.type == F_FILE)
self.assertEquals(len(file_observables), 2)
# make sure the files are actually there
for _file in file_observables:
self.assertTrue(_file.exists)
# we should also have a non-empty state
self.assertTrue(bool(root.state))
# and we should have some action counters
self.assertTrue(bool(root.action_counters))
# reset the analysis
root.reset()
# the original observable should still be there
o1 = root.get_observable(o1.id)
self.assertIsNotNone(o1)
analysis = o1.get_analysis(BasicTestAnalysis)
# but it should NOT have analysis
self.assertIsNone(analysis)
# and that should be the only observable
self.assertEquals(len(root.all_observables), 2)
# and those two files should not exist anymore
for _file in file_observables:
self.assertFalse(os.path.exists(abs_path(_file.value)))
def test_analysis_reset_locked(self):
from saq.database import acquire_lock, release_lock, LockedException
root = create_root_analysis()
root.initialize_storage()
o1 = root.add_observable(F_TEST, 'test_add_file')
o2 = root.add_observable(F_TEST, 'test_action_counter')
root.save()
root.schedule()
# lock the analysis we created
lock_uuid = acquire_lock(root.uuid)
# now try to reset it
with self.assertRaises(LockedException):
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
root.reset()
# unlock the analysis we created
release_lock(root.uuid, lock_uuid)
# the reset should work this time
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
root.reset()
def test_watched_files(self):
# make sure we check every time
saq.CONFIG['global']['check_watched_files_frequency'] = '0'
engine = TestEngine(pool_size_limit=1)
engine.enable_module('analysis_module_basic_test')
engine.start()
# the module creates the file we're going to watch, so wait for that to appear
watched_file_path = os.path.join(saq.TEMP_DIR, 'watched_file')
self.wait_for_condition(lambda : os.path.exists(watched_file_path))
# and then wait for it to start watching it
wait_for_log_count(f"watching file {watched_file_path}", 1)
# go ahead and modify it
with open(watched_file_path, 'w') as fp:
fp.write("data has changed")
root = create_root_analysis()
root.initialize_storage()
o1 = root.add_observable(F_TEST, 'test_watched_file')
root.save()
root.schedule()
wait_for_log_count(f"detected change to {watched_file_path}", 1)
wait_for_log_count(f"watched_file_modified: {watched_file_path}", 1)
engine.controlled_stop()
engine.wait()
def test_archive(self):
from saq.database import Alert
root = create_root_analysis(analysis_mode='test_single')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_detection')
file_path = self.create_test_file(root_analysis=root)
root_file_observable = root.add_observable(F_FILE, file_path)
test_file_observable = root.add_observable(F_TEST, 'test_add_file')
root.save()
root.schedule()
engine = TestEngine()
engine.enable_alerting()
engine.enable_module('analysis_module_basic_test', 'test_single')
engine.controlled_stop()
engine.start()
engine.wait()
alert = saq.db.query(Alert).filter(Alert.uuid==root.uuid).one()
saq.db.commit()
alert.load()
test_observable = alert.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
basic_analysis = test_observable.get_analysis('BasicTestAnalysis')
self.assertIsNotNone(basic_analysis)
self.assertIsNotNone(basic_analysis.details)
test_file_observable = alert.get_observable(test_file_observable.id)
self.assertIsNotNone(test_file_observable)
basic_analysis = test_file_observable.get_analysis('BasicTestAnalysis')
self.assertIsNotNone(basic_analysis)
self.assertIsNotNone(basic_analysis.details)
additional_file_observable = basic_analysis.find_observable(F_FILE)
self.assertIsNotNone(additional_file_observable)
alert.archive()
alert.sync()
# need to clear the sqlalchemy identity cache
saq.db.close()
alert = saq.db.query(Alert).filter(Alert.uuid==root.uuid).one()
self.assertTrue(alert.archived)
alert.load()
test_observable = alert.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
basic_analysis = test_observable.get_analysis('BasicTestAnalysis')
self.assertIsNotNone(basic_analysis)
# the analysis details should be empty
self.assertIsNone(basic_analysis.details)
# but the summary should be OK
self.assertTrue(bool(basic_analysis.summary))
root_file_observable = alert.get_observable(root_file_observable.id)
self.assertIsNotNone(root_file_observable)
# the file that came with the alert should still be there
self.assertTrue(root_file_observable.exists)
additional_file_observable = alert.get_observable(additional_file_observable.id)
# but the one that was added during analysis should NOT be there
self.assertFalse(additional_file_observable.exists)
def test_cleanup(self):
from saq.constants import DISPOSITION_FALSE_POSITIVE
from saq.database import Alert
from saq.util.maintenance import cleanup_alerts
fp_root = create_root_analysis(analysis_mode='test_single', uuid=str(uuid.uuid4()))
fp_root.initialize_storage()
test_observable = fp_root.add_observable(F_TEST, 'test_detection')
fp_root.save()
fp_root.schedule()
ignore_root = create_root_analysis(analysis_mode='test_single', uuid=str(uuid.uuid4()))
ignore_root.initialize_storage()
test_observable = ignore_root.add_observable(F_TEST, 'test_detection')
ignore_root.save()
ignore_root.schedule()
engine = TestEngine()
engine.enable_alerting()
engine.enable_module('analysis_module_basic_test', 'test_single')
engine.controlled_stop()
engine.start()
engine.wait()
alert = saq.db.query(Alert).filter(Alert.uuid==fp_root.uuid).one()
alert.load()
# we'll set the time of the disposition to one day past the configured limit
alert.disposition = DISPOSITION_FALSE_POSITIVE
alert.disposition_time = datetime.datetime.now() - datetime.timedelta(days=saq.CONFIG['global'].getint('fp_days') + 1)
alert.sync()
saq.db.remove()
alert = saq.db.query(Alert).filter(Alert.uuid==ignore_root.uuid).one()
alert.load()
# we'll set the time of the disposition to one day past the configured limit
alert.disposition = DISPOSITION_IGNORE
alert.disposition_time = datetime.datetime.now() - datetime.timedelta(days=saq.CONFIG['global'].getint('ignore_days') + 1)
alert.sync()
saq.db.remove()
# calling cleanup will cause the alert to get archived
cleanup_alerts()
saq.db.remove()
# now this alert should be archived
alert = saq.db.query(Alert).filter(Alert.uuid == fp_root.uuid).one()
self.assertTrue(alert.archived)
# and this alert should be gone
self.assertIsNone(saq.db.query(Alert).filter(Alert.uuid == ignore_root.uuid).first())
self.assertFalse(os.path.exists(ignore_root.storage_dir))
def test_analysis_mode_dispositioned(self):
from saq.database import Alert, User, Workload, add_workload, set_dispositions
root = create_root_analysis(analysis_mode='test_single')
root.initialize_storage()
observable = root.add_observable(F_TEST, 'test_detection')
root.save()
root.schedule()
engine = TestEngine(pool_size_limit=1, local_analysis_modes=['test_single', ANALYSIS_MODE_CORRELATION])
engine.enable_alerting()
engine.enable_module('analysis_module_basic_test', 'test_single')
engine.controlled_stop()
engine.start()
engine.wait()
# we should have a single alert
self.assertEquals(saq.db.query(Alert.id).count(), 1)
# and an empty workload
self.assertEquals(saq.db.query(Workload.id).count(), 0)
# set the disposition of this alert
set_dispositions([root.uuid],
DISPOSITION_FALSE_POSITIVE,
saq.db.query(User).first().id)
# check the disposition
saq.db.close()
self.assertEquals(saq.db.query(Alert).first().disposition, DISPOSITION_FALSE_POSITIVE)
# we should have an entry in the workload for this now
self.assertEquals(saq.db.query(Workload.id).count(), 1)
workload_entry = saq.db.query(Workload).first()
self.assertIsNotNone(workload_entry)
self.assertEquals(workload_entry.uuid, root.uuid)
self.assertEquals(workload_entry.analysis_mode, ANALYSIS_MODE_DISPOSITIONED)
# start the engine back up with this mode enabled
engine = TestEngine(pool_size_limit=1, local_analysis_modes=[ANALYSIS_MODE_DISPOSITIONED])
engine.controlled_stop()
engine.start()
engine.wait()
# workload should be clear again
saq.db.close()
self.assertEquals(saq.db.query(Workload.id).count(), 0)
# analysis mode should have changed
alert = saq.db.query(Alert).filter(Alert.uuid == root.uuid).first()
alert.load()
self.assertEquals(alert.analysis_mode, ANALYSIS_MODE_DISPOSITIONED)
# add another observable and add it back to the workload under correlation mode
observable_2 = alert.add_observable(F_TEST, 'test_1')
alert.analysis_mode = 'test_single'
alert.sync()
add_workload(alert)
engine = TestEngine(pool_size_limit=1, local_analysis_modes=['test_single', ANALYSIS_MODE_CORRELATION])
engine.enable_module('analysis_module_basic_test', 'test_single')
engine.controlled_stop()
engine.start()
engine.wait()
# make sure observable_2 got analyzed
saq.db.close()
alert = saq.db.query(Alert).filter(Alert.uuid == root.uuid).first()
alert.load()
observable_2 = alert.get_observable(observable_2.id)
self.assertIsNotNone(observable_2)
analysis = observable_2.get_analysis('BasicTestAnalysis')
self.assertIsNotNone(analysis)
def test_observable_whitelisting(self):
from saq.database import add_observable_tag_mapping, remove_observable_tag_mapping
# add a user-defined whitelisting
add_observable_tag_mapping(F_TEST, 'test_1', None, 'whitelisted')
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
self.assertTrue(test_observable.has_tag('whitelisted'))
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# we should NOT see any analysis for this observable
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
self.assertEquals(len(test_observable.analysis), 0)
# remove the whitelisting
remove_observable_tag_mapping(F_TEST, 'test_1', None, 'whitelisted')
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
test_observable = root.add_observable(F_TEST, 'test_1')
self.assertFalse(test_observable.has_tag('whitelisted'))
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_groups': 1})
engine.enable_module('analysis_module_basic_test')
engine.controlled_stop()
engine.start()
engine.wait()
# we should see any one analysis for this observable
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
test_observable = root.get_observable(test_observable.id)
self.assertIsNotNone(test_observable)
self.assertEquals(len(test_observable.analysis), 1)
def test_file_observable_whitelisting(self):
from saq.database import add_observable_tag_mapping, remove_observable_tag_mapping
# add a user-defined whitelisting
add_observable_tag_mapping(F_SHA256, '315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3', None, 'whitelisted')
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
test_file = self.create_test_file(file_content='Hello, world!', root_analysis=root)
file_observable = root.add_observable(F_FILE, test_file)
self.assertTrue(file_observable.has_tag('whitelisted'))
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_single': 1})
engine.enable_module('analysis_module_generic_test', 'test_single')
engine.controlled_stop()
engine.start()
engine.wait()
# we should NOT see any analysis for this observable
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
file_observable = root.get_observable(file_observable.id)
self.assertIsNotNone(file_observable)
self.assertEquals(len(file_observable.analysis), 0)
# remove the whitelisting
remove_observable_tag_mapping(F_SHA256, '315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3', None, 'whitelisted')
root = create_root_analysis(uuid=str(uuid.uuid4()), analysis_mode='test_single')
root.initialize_storage()
test_file = self.create_test_file(file_content='Hello, world!', root_analysis=root)
file_observable = root.add_observable(F_FILE, test_file)
self.assertFalse(file_observable.has_tag('whitelisted'))
root.save()
root.schedule()
engine = TestEngine(analysis_pools={'test_single': 1})
engine.enable_module('analysis_module_generic_test', 'test_single')
engine.controlled_stop()
engine.start()
engine.wait()
# we should NOT see any analysis for this observable
root = RootAnalysis(storage_dir=root.storage_dir)
root.load()
file_observable = root.get_observable(file_observable.id)
self.assertIsNotNone(file_observable)
from saq.modules.test import GenericTestAnalysis
analysis = file_observable.get_analysis(GenericTestAnalysis)
self.assertIsNotNone(analysis)
|
winfstest.py
|
# winfstest.py
#
# Copyright (c) 2015, Bill Zissimopoulos. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect, os, random, re, subprocess, sys, threading, types
__all__ = [
"testline", "testeval", "testdone", "uniqname",
"fstest", "fstest_task", "expect", "expect_task"]
_ntests = 0
def testline(ok, diag = ""):
global _ntests
_ntests += 1
print "%sok %s%s%s" % ("" if ok else "not ", _ntests, " - " if diag else "", diag)
def testeval(ok):
diag = inspect.stack()[1]
if diag and diag[4] is not None and diag[5] is not None:
diag = diag[4][diag[5]]
diag = diag.strip()
else:
diag = ""
testline(ok, diag)
def testdone():
global _ntests
print "1..%s" % _ntests
_ntests = 0
def uniqname():
return "%08x" % random.randint(1, 2 ** 32)
_fstest_exe = os.path.splitext(os.path.realpath(__file__))[0] + ".exe"
_field_re = re.compile(r'(?:[^\s"]|"[^"]*")+')
class _fstest_task(object):
def __init__(self, tsk, cmd, exp):
self.tsk = tsk
self.cmd = cmd
self.exp = exp
self.out = None
self.err = None
self.res = None
arg = cmd.split() if hasattr(cmd, "split") else list(cmd)
if self.tsk:
arg.insert(0, "-w")
self.prc = subprocess.Popen([_fstest_exe] + arg,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.thr = threading.Thread(target=self._readthread)
self.thr.start()
def __enter__(self):
self.thr.join()
self.err, self.res = self._fstest_res()
if self.exp is not None:
self._expect(self.cmd, self.exp, self.err, self.res)
return self
def __exit__(self, type, value, traceback):
if self.tsk:
try:
self.prc.stdin.write("\n")
except IOError:
pass
self.prc.stdin.close()
self.prc.wait()
ret = self.prc.poll()
if ret:
raise subprocess.CalledProcessError(ret, self.cmd)
def _readthread(self):
self.out = self.prc.stdout.read()
self.out = self.out.replace("\r\n", "\n").replace("\r", "\n")
def _fstest_res(self):
out = self.out.splitlines()
res = []
for l in out[1:]:
if not l:
continue
d = {}
res.append(d)
for p in _field_re.findall(l):
k, v = p.split("=", 2)
if v.startswith('"') and v.endswith('"') and len(v) >= 2:
v = v[1:-1]
else:
try:
v = int(v, 0)
except:
pass
d[k] = v
return out[0], res
def _expect(self, cmd, exp, err, res):
s = "expect" if not self.tsk else "expect_task"
if isinstance(exp, types.FunctionType): # function, lambda
if "0" == err:
testline(exp(res), "%s \"%s\" %s - result %s" % (s, cmd, exp.__name__, res))
else:
testline(0, "%s \"%s\" %s - got %s" % (s, cmd, 0, err))
else:
if err is None:
testline(1, "%s \"%s\" %s" % (s, cmd, exp))
elif str(exp) == err:
testline(1, "%s \"%s\" %s" % (s, cmd, exp))
else:
testline(0, "%s \"%s\" %s - got %s" % (s, cmd, exp, err))
def fstest(cmd):
with _fstest_task(False, cmd, None) as task:
pass
return task.err, task.res
def expect(cmd, exp):
with _fstest_task(False, cmd, exp) as task:
pass
return task.err, task.res
def fstest_task(cmd):
return _fstest_task(True, cmd, None)
def expect_task(cmd, exp):
if isinstance(exp, types.FunctionType): # function, lambda
print "# expect_task \"%s\" %s" % (cmd, exp.__name__)
else:
print "# expect_task \"%s\" %s" % (cmd, exp)
return _fstest_task(True, cmd, exp)
|
tunnel-manager.py
|
#!/usr/bin/env python3
import os, yaml, time
from subprocess import Popen, PIPE
from multiprocessing import Process
import sys
def log(prefix, msg):
for l in msg.splitlines():
print("{}: {}".format(prefix, l))
def run(cmd, splitlines=False):
# you had better escape cmd cause it's goin to the shell as is
proc = Popen([cmd], stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
out, err = proc.communicate()
if splitlines:
out_split = []
for line in out.split("\n"):
line = line.strip()
if line != '':
out_split.append(line)
out = out_split
exitcode = int(proc.returncode)
return (out, err, exitcode)
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
def run_with_agent(cmd: str):
"""
Run the given command in a shell session with a running ssh-agent
:param cmd: The command to run
:return: The stdout of the process
"""
return run("eval `ssh-agent` && " + cmd)
def tunnelDaemon(host, dynamic_ports=[], local_ports=[], remote_ports=[], keepalive=60):
cmd = ["ssh"]
cmd.append("-N")
cmd.append("-o ExitOnForwardFailure=yes")
#cmd.append("-4")
try:
if keepalive > 0:
cmd.append("-o ServerAliveInterval={}".format(keepalive))
except:
pass
port_count = 0
for p in dynamic_ports:
cmd.append("-D")
cmd.append("{}".format(p))
port_count += 1
for p in local_ports:
cmd.append("-L")
p1,p2 = p.split(":")
cmd.append("{}:127.0.0.1:{}".format(p1,p2))
port_count += 1
for p in remote_ports:
cmd.append("-R")
cmd.append("{}".format(p))
port_count += 1
cmd.append(host)
if port_count == 0:
log(host, "No ports configured, nothing to do")
return False
while True:
log(host, "Starting tunnel...")
(out, err, exitcode) = tunnelProcess(cmd)
log (host, "Tunnel exited with error code {}".format(exitcode))
log (host, err)
log (host, out)
log (host, "Retrying in 10 seconds...")
time.sleep(10)
def tunnelProcess(cmd=[]):
#print (" ".join(cmd))
(out, err, exitcode) = run_with_agent(" ".join(cmd))
return (out, err, exitcode)
if __name__ == '__main__':
conf_file = os.path.expanduser('~')+"/.ssh/tunnels.yml"
with open(conf_file) as f:
conf = yaml.load(f, Loader=yaml.FullLoader)
# print (conf)
for t in conf["tunnels"].keys():
print (t)
try:
host = conf["tunnels"][t]["host"]
except KeyError:
host = t
try:
dynamic_ports = conf["tunnels"][t]["dynamic"]
except KeyError:
dynamic_ports = []
try:
local_ports = conf["tunnels"][t]["local"]
except KeyError:
local_ports = []
try:
remote_ports = conf["tunnels"][t]["remote"]
except KeyError:
remote_ports = []
try:
keepalive = int(conf["tunnels"][t]["keepalive"])
except:
try:
keepalive = int(conf["defaults"]["keepalive"])
except:
keepalive = 60
p = Process(target=tunnelDaemon, args=(host,dynamic_ports,local_ports,remote_ports,keepalive ))
p.start()
# p.join()
|
test_concurrency.py
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for concurrency libraries."""
import glob
import os
import random
import sys
import threading
import time
from flaky import flaky
import coverage
from coverage import env
from coverage.backward import import_local_file
from coverage.data import line_counts
from coverage.files import abs_file
from tests.coveragetest import CoverageTest
from tests.helpers import remove_files
# These libraries aren't always available, we'll skip tests if they aren't.
try:
import multiprocessing
except ImportError: # pragma: only jython
multiprocessing = None
try:
import eventlet
except ImportError:
eventlet = None
try:
import gevent
except ImportError:
gevent = None
try:
import greenlet
except ImportError: # pragma: only jython
greenlet = None
def measurable_line(l):
"""Is this a line of code coverage will measure?
Not blank, not a comment, and not "else"
"""
l = l.strip()
if not l:
return False
if l.startswith('#'):
return False
if l.startswith('else:'):
return False
if env.JYTHON and l.startswith(('try:', 'except:', 'except ', 'break', 'with ')):
# Jython doesn't measure these statements.
return False # pragma: only jython
return True
def line_count(s):
"""How many measurable lines are in `s`?"""
return len(list(filter(measurable_line, s.splitlines())))
def print_simple_annotation(code, linenos):
"""Print the lines in `code` with X for each line number in `linenos`."""
for lineno, line in enumerate(code.splitlines(), start=1):
print(" {} {}".format("X" if lineno in linenos else " ", line))
class LineCountTest(CoverageTest):
"""Test the helpers here."""
run_in_temp_dir = False
def test_line_count(self):
CODE = """
# Hey there!
x = 1
if x:
print("hello")
else:
print("bye")
print("done")
"""
self.assertEqual(line_count(CODE), 5)
# The code common to all the concurrency models.
SUM_RANGE_Q = """
# Above this will be imports defining queue and threading.
class Producer(threading.Thread):
def __init__(self, limit, q):
threading.Thread.__init__(self)
self.limit = limit
self.q = q
def run(self):
for i in range(self.limit):
self.q.put(i)
self.q.put(None)
class Consumer(threading.Thread):
def __init__(self, q, qresult):
threading.Thread.__init__(self)
self.q = q
self.qresult = qresult
def run(self):
sum = 0
while "no peephole".upper():
i = self.q.get()
if i is None:
break
sum += i
self.qresult.put(sum)
def sum_range(limit):
q = queue.Queue()
qresult = queue.Queue()
c = Consumer(q, qresult)
p = Producer(limit, q)
c.start()
p.start()
p.join()
c.join()
return qresult.get()
# Below this will be something using sum_range.
"""
PRINT_SUM_RANGE = """
print(sum_range({QLIMIT}))
"""
# Import the things to use threads.
if env.PY2:
THREAD = """
import threading
import Queue as queue
"""
else:
THREAD = """
import threading
import queue
"""
# Import the things to use eventlet.
EVENTLET = """
import eventlet.green.threading as threading
import eventlet.queue as queue
"""
# Import the things to use gevent.
GEVENT = """
from gevent import monkey
monkey.patch_thread()
import threading
import gevent.queue as queue
"""
# Uncomplicated code that doesn't use any of the concurrency stuff, to test
# the simple case under each of the regimes.
SIMPLE = """
total = 0
for i in range({QLIMIT}):
total += i
print(total)
"""
def cant_trace_msg(concurrency, the_module):
"""What might coverage.py say about a concurrency setting and imported module?"""
# In the concurrency choices, "multiprocessing" doesn't count, so remove it.
if "multiprocessing" in concurrency:
parts = concurrency.split(",")
parts.remove("multiprocessing")
concurrency = ",".join(parts)
if the_module is None:
# We don't even have the underlying module installed, we expect
# coverage to alert us to this fact.
expected_out = (
"Couldn't trace with concurrency=%s, "
"the module isn't installed.\n" % concurrency
)
elif env.C_TRACER or concurrency == "thread" or concurrency == "":
expected_out = None
else:
expected_out = (
"Can't support concurrency=%s with PyTracer, "
"only threads are supported\n" % concurrency
)
return expected_out
class ConcurrencyTest(CoverageTest):
"""Tests of the concurrency support in coverage.py."""
QLIMIT = 1000
def try_some_code(self, code, concurrency, the_module, expected_out=None):
"""Run some concurrency testing code and see that it was all covered.
`code` is the Python code to execute. `concurrency` is the name of
the concurrency regime to test it under. `the_module` is the imported
module that must be available for this to work at all. `expected_out`
is the text we expect the code to produce.
"""
self.make_file("try_it.py", code)
cmd = "coverage run --concurrency=%s try_it.py" % concurrency
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
self.assertEqual(out, expected_cant_trace)
else:
# We can fully measure the code if we are using the C tracer, which
# can support all the concurrency, or if we are using threads.
if expected_out is None:
expected_out = "%d\n" % (sum(range(self.QLIMIT)))
print(code)
self.assertEqual(out, expected_out)
# Read the coverage file and see that try_it.py has all its lines
# executed.
data = coverage.CoverageData(".coverage")
data.read()
# If the test fails, it's helpful to see this info:
fname = abs_file("try_it.py")
linenos = data.lines(fname)
print("{}: {}".format(len(linenos), linenos))
print_simple_annotation(code, linenos)
lines = line_count(code)
self.assertEqual(line_counts(data)['try_it.py'], lines)
def test_threads(self):
code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_threads_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_eventlet(self):
code = (EVENTLET + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_eventlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_gevent(self):
code = (GEVENT + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_gevent_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_greenlet(self):
GREENLET = """\
from greenlet import greenlet
def test1(x, y):
z = gr2.switch(x+y)
print(z)
def test2(u):
print(u)
gr1.switch(42)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
gr1.switch("hello", " world")
"""
self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n")
def test_greenlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "greenlet", greenlet)
def test_bug_330(self):
BUG_330 = """\
from weakref import WeakKeyDictionary
import eventlet
def do():
eventlet.sleep(.01)
gts = WeakKeyDictionary()
for _ in range(100):
gts[eventlet.spawn(do)] = True
eventlet.sleep(.005)
eventlet.sleep(.1)
print(len(gts))
"""
self.try_some_code(BUG_330, "eventlet", eventlet, "0\n")
SQUARE_OR_CUBE_WORK = """
def work(x):
# Use different lines in different subprocesses.
if x % 2:
y = x*x
else:
y = x*x*x
return y
"""
SUM_RANGE_WORK = """
def work(x):
return sum_range((x+1)*100)
"""
MULTI_CODE = """
# Above this will be a definition of work().
import multiprocessing
import os
import time
import sys
def process_worker_main(args):
# Need to pause, or the tasks go too quickly, and some processes
# in the pool don't get any work, and then don't record data.
time.sleep(0.02)
ret = work(*args)
return os.getpid(), ret
if __name__ == "__main__": # pragma: no branch
# This if is on a single line so we can get 100% coverage
# even if we have no arguments.
if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1])
pool = multiprocessing.Pool({NPROCS})
inputs = [(x,) for x in range({UPTO})]
outputs = pool.imap_unordered(process_worker_main, inputs)
pids = set()
total = 0
for pid, sq in outputs:
pids.add(pid)
total += sq
print("%d pids, total = %d" % (len(pids), total))
pool.close()
pool.join()
"""
@flaky(max_runs=30) # Sometimes a test fails due to inherent randomness. Try more times.
class MultiprocessingTest(CoverageTest):
"""Test support of the multiprocessing module."""
def setUp(self):
if not multiprocessing:
self.skipTest("No multiprocessing in this Python") # pragma: only jython
super(MultiprocessingTest, self).setUp()
def try_multiprocessing_code(
self, code, expected_out, the_module, nprocs, concurrency="multiprocessing", args=""
):
"""Run code using multiprocessing, it should produce `expected_out`."""
self.make_file("multi.py", code)
self.make_file(".coveragerc", """\
[run]
concurrency = %s
source = .
""" % concurrency)
if env.PYVERSION >= (3, 4):
start_methods = ['fork', 'spawn']
else:
start_methods = ['']
for start_method in start_methods:
if start_method and start_method not in multiprocessing.get_all_start_methods():
continue
remove_files(".coverage", ".coverage.*")
cmd = "coverage run {args} multi.py {start_method}".format(
args=args, start_method=start_method,
)
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
self.assertEqual(out, expected_cant_trace)
else:
self.assertEqual(out.rstrip(), expected_out)
self.assertEqual(len(glob.glob(".coverage.*")), nprocs + 1)
out = self.run_command("coverage combine")
self.assertEqual(out, "")
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
self.assertRegex(last_line, r"multi.py \d+ 0 100%")
def test_multiprocessing_simple(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = "{nprocs} pids, total = {total}".format(nprocs=nprocs, total=total)
self.try_multiprocessing_code(code, expected_out, threading, nprocs)
def test_multiprocessing_append(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = "{nprocs} pids, total = {total}".format(nprocs=nprocs, total=total)
self.try_multiprocessing_code(code, expected_out, threading, nprocs, args="--append")
def test_multiprocessing_and_gevent(self):
nprocs = 3
upto = 30
code = (
SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE
).format(NPROCS=nprocs, UPTO=upto)
total = sum(sum(range((x + 1) * 100)) for x in range(upto))
expected_out = "{nprocs} pids, total = {total}".format(nprocs=nprocs, total=total)
self.try_multiprocessing_code(
code, expected_out, eventlet, nprocs, concurrency="multiprocessing,eventlet"
)
def try_multiprocessing_code_with_branching(self, code, expected_out):
"""Run code using multiprocessing, it should produce `expected_out`."""
self.make_file("multi.py", code)
self.make_file("multi.rc", """\
[run]
concurrency = multiprocessing
branch = True
omit = */site-packages/*
""")
if env.PYVERSION >= (3, 4):
start_methods = ['fork', 'spawn']
else:
start_methods = ['']
for start_method in start_methods:
if start_method and start_method not in multiprocessing.get_all_start_methods():
continue
out = self.run_command("coverage run --rcfile=multi.rc multi.py %s" % (start_method,))
self.assertEqual(out.rstrip(), expected_out)
out = self.run_command("coverage combine")
self.assertEqual(out, "")
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
self.assertRegex(last_line, r"multi.py \d+ 0 \d+ 0 100%")
def test_multiprocessing_with_branching(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = "{nprocs} pids, total = {total}".format(nprocs=nprocs, total=total)
self.try_multiprocessing_code_with_branching(code, expected_out)
def test_coverage_stop_in_threads():
has_started_coverage = []
has_stopped_coverage = []
def run_thread(): # pragma: nested
"""Check that coverage is stopping properly in threads."""
deadline = time.time() + 5
ident = threading.currentThread().ident
if sys.gettrace() is not None:
has_started_coverage.append(ident)
while sys.gettrace() is not None:
# Wait for coverage to stop
time.sleep(0.01)
if time.time() > deadline:
return
has_stopped_coverage.append(ident)
cov = coverage.Coverage()
cov.start()
t = threading.Thread(target=run_thread) # pragma: nested
t.start() # pragma: nested
time.sleep(0.1) # pragma: nested
cov.stop() # pragma: nested
t.join()
assert has_started_coverage == [t.ident]
assert has_stopped_coverage == [t.ident]
def test_thread_safe_save_data(tmpdir):
# Non-regression test for:
# https://bitbucket.org/ned/coveragepy/issues/581
# Create some Python modules and put them in the path
modules_dir = tmpdir.mkdir('test_modules')
module_names = ["m{:03d}".format(i) for i in range(1000)]
for module_name in module_names:
modules_dir.join(module_name + ".py").write("def f(): pass\n")
# Shared variables for threads
should_run = [True]
imported = []
old_dir = os.getcwd()
os.chdir(modules_dir.strpath)
try:
# Make sure that all dummy modules can be imported.
for module_name in module_names:
import_local_file(module_name)
def random_load(): # pragma: nested
"""Import modules randomly to stress coverage."""
while should_run[0]:
module_name = random.choice(module_names)
mod = import_local_file(module_name)
mod.f()
imported.append(mod)
# Spawn some threads with coverage enabled and attempt to read the
# results right after stopping coverage collection with the threads
# still running.
duration = 0.01
for _ in range(3):
cov = coverage.Coverage()
cov.start()
threads = [threading.Thread(target=random_load) for _ in range(10)] # pragma: nested
should_run[0] = True # pragma: nested
for t in threads: # pragma: nested
t.start()
time.sleep(duration) # pragma: nested
cov.stop() # pragma: nested
# The following call used to crash with running background threads.
cov.get_data()
# Stop the threads
should_run[0] = False
for t in threads:
t.join()
if (not imported) and duration < 10: # pragma: only failure
duration *= 2
finally:
os.chdir(old_dir)
should_run[0] = False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.